CombinedText stringlengths 4 3.42M |
|---|
import argparse
from rasa.cli.arguments.run import add_run_arguments
from rasa.cli.arguments.default_arguments import add_domain_param, add_stories_param
from rasa.cli.arguments.train import (
add_force_param,
add_data_param,
add_config_param,
add_out_param,
)
def set_interactive_args(parser):
add_run_arguments(parser)
add_config_param(parser)
add_out_param(parser)
add_domain_param(parser)
add_data_param(parser)
add_force_param(parser)
add_skip_visualization_param(parser)
def set_interactive_core_args(parser):
add_config_param(parser)
add_out_param(parser)
add_domain_param(parser)
add_stories_param(parser)
add_domain_param(parser)
add_run_arguments(parser)
add_skip_visualization_param(parser)
def add_skip_visualization_param(parser: argparse.ArgumentParser):
parser.add_argument(
"--skip-visualization",
default=False,
action="store_true",
help="Disables plotting the visualization during interactive learning",
)
Refactor interactive args.
import argparse
from rasa.cli.arguments.run import add_run_arguments
from rasa.cli.arguments.default_arguments import add_domain_param, add_stories_param
from rasa.cli.arguments.train import (
add_force_param,
add_data_param,
add_config_param,
add_out_param,
)
def set_interactive_args(parser):
add_config_param(parser)
add_domain_param(parser)
add_data_param(parser)
add_out_param(parser)
add_force_param(parser)
add_skip_visualization_param(parser)
add_run_arguments(parser)
def set_interactive_core_args(parser):
add_config_param(parser)
add_domain_param(parser)
add_stories_param(parser)
add_out_param(parser)
add_skip_visualization_param(parser)
add_run_arguments(parser)
def add_skip_visualization_param(parser: argparse.ArgumentParser):
parser.add_argument(
"--skip-visualization",
default=False,
action="store_true",
help="Disables plotting the visualization during interactive learning",
)
|
# -*-coding:utf-8-*-
"""
application
~~~~~~~~~~~
Use this model to initialize web application.
Usage
=====
>>> from karlooper.web import IOModel
>>> from karlooper.web.application import Application
>>> application = Application(handlers={}, settings={}, port=8080, log_conf="./config.log")
>>> application.run(io_model=IOModel.POLL)
server run on port: 8080
run with poll
>>> application = Application(handlers={}, settings={}, log_conf="./config.log")
>>> application.listen(8000)
>>> application.run(io_model=IOModel.POLL)
server run on port: 8000
run with poll
"""
import socket
import select
from karlooper.logger.logger import init_logger
from karlooper.web import IOModel
from karlooper.web.__async_core_server import EchoServer, asyncore
from karlooper.web.http_connection import HttpConnection
from karlooper.web.http_io_buffer import HttpIOBuffer
from karlooper.web.http_io_routine_pool import HttpIORoutinePool
from karlooper.http_parser.http_parser import HttpParser
from karlooper.config import get_cli_data, set_cli_data
from karlooper.config.config import SOCKET_RECEIVE_SIZE, DEFAULT_PORT, CLIENT_CONNECT_TO_SERVER_NUM
__author__ = 'karlvorndoenitz@gmail.com'
class Application(object):
def __init__(self, handlers, settings=None, **kwargs):
"""
:param handlers: handlers mapping, dict type
:param settings: settings mapping, dict type
:param kwargs: options
"""
self.settings = settings
self.handlers = handlers
set_cli_data(self.settings)
set_cli_data(kwargs)
cli_data = get_cli_data()
self.port = int(cli_data.get("port", DEFAULT_PORT))
log_conf = self.settings.get("log_conf", None) if self.settings else kwargs.get("log_conf", None)
self.logger = init_logger(config_path=log_conf)
self.EOL1 = b'\n\n'
self.EOL2 = b'\n\r\n'
self.response = ""
def listen(self, port):
"""listen port
:param port: port that application listened
:return: None
"""
self.port = int(port)
def __run_epoll(self):
"""
run the application use epoll
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = epoll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
epoll.register(connection.fileno(), select.EPOLLIN) # register socket read event to epoll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.EPOLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
self.logger.error("connection error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
epoll.unregister(fileno)
elif event & select.EPOLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
epoll.modify(fileno, select.EPOLLHUP)
elif event & select.EPOLLHUP: # if message sent and file number in epoll is hup
epoll.unregister(fileno) # remove file number from epoll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
epoll.close()
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
finally:
epoll.unregister(server_socket.fileno())
epoll.close()
server_socket.close()
def __run_kqueue(self):
"""
run server use kqueue
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM)
kq = select.kqueue()
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
index = 1
events = [
select.kevent(server_socket.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD),
select.kevent(server_socket.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
]
events_buf = []
while True:
try:
event_list = kq.control(events, 128, 0.0001) + events_buf
events_buf = []
except select.error as e:
self.logger.error("error in __run_kqueue: %s", str(e))
break
if event_list:
for each in event_list:
if each.ident == server_socket.fileno():
index += 1
conn, addr = server_socket.accept()
http_connection.add_connection(index, conn)
events.append(
select.kevent(
http_connection.get_connection(index).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=index
)
)
else:
try:
if each.udata >= 1 and each.filter == select.KQ_FILTER_READ:
http_parser = http_io_routine_pool.get(file_no=each.udata)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_routine_pool.remove(each.udata)
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
else:
conn = http_connection.get_connection(each.udata)
request_data = conn.recv(SOCKET_RECEIVE_SIZE)
request_data = request_data[:-2] if request_data.endswith("\r\n") else request_data
http_parser = HttpParser(
request_data,
handlers=self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
elif each.udata >= 1 and each.filter == select.KQ_FILTER_WRITE:
conn = http_connection.get_connection(each.udata)
data = http_io_buffer.get_response(each.udata)
conn.send(data)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata)
)
conn.close()
http_connection.remove_connection(each.udata)
except Exception as e:
self.logger.info("error in __run_kqueue event list: %s", str(e))
self.logger.info("each filter: %s", each.filter)
self.__remove_event(events, each)
http_connection.remove_connection(each.udata)
http_io_buffer.remove_request(each.udata)
http_io_buffer.remove_response(each.udata)
http_io_routine_pool.remove(each.udata)
kq.close()
kq = select.kqueue()
server_socket.close()
def __run_poll(self):
"""
run server use poll, I will modify __run_poll and __run_epoll in the future
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
poll = select.poll()
poll.register(server_socket.fileno(), select.POLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = poll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
poll.register(connection.fileno(), select.POLLIN) # register socket read event to poll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.POLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
self.logger.error("connection error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
poll.unregister(fileno)
elif event & select.POLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
poll.modify(fileno, select.POLLHUP)
elif event & select.POLLHUP: # if message sent and file number in poll is hup
poll.unregister(fileno) # remove file number from poll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_poll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
poll.unregister(fileno)
finally:
poll.unregister(server_socket.fileno())
poll.close()
server_socket.close()
def __run_async_io(self):
"""
run server use asyncore
"""
EchoServer('0.0.0.0', self.port, self.handlers, self.settings)
asyncore.loop()
def __remove_event(self, events, each):
"""remove event from events
:param events: the list contain some events
:param each: the event will be removed
:return: None
"""
self.logger.warning("remove event with udata: %s", str(each.udata))
for event in events:
if event.ident == each.ident:
events.remove(event)
break
def run(self, io_model=None):
"""run the web server
:param io_model: os io model, EPOLL 0 KQUEUE 1 POLL 2
:return: None
"""
print("server run on port: %d" % self.port)
self.logger.info("server run on port: %d" % self.port)
if io_model:
if io_model == IOModel.EPOLL and hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif io_model == IOModel.KQUEUE and hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif io_model == IOModel.POLL and hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
if hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
print("run with asyncore")
self.logger.info("run with asyncore")
self.__run_async_io()
print("server start failed!")
self.logger.info("server start failed!")
fix a bug
# -*-coding:utf-8-*-
"""
application
~~~~~~~~~~~
Use this model to initialize web application.
Usage
=====
>>> from karlooper.web import IOModel
>>> from karlooper.web.application import Application
>>> application = Application(handlers={}, settings={}, port=8080, log_conf="./config.log")
>>> application.run(io_model=IOModel.POLL)
server run on port: 8080
run with poll
>>> application = Application(handlers={}, settings={}, log_conf="./config.log")
>>> application.listen(8000)
>>> application.run(io_model=IOModel.POLL)
server run on port: 8000
run with poll
"""
import socket
import select
from karlooper.logger.logger import init_logger
from karlooper.web import IOModel
from karlooper.web.__async_core_server import EchoServer, asyncore
from karlooper.web.http_connection import HttpConnection
from karlooper.web.http_io_buffer import HttpIOBuffer
from karlooper.web.http_io_routine_pool import HttpIORoutinePool
from karlooper.http_parser.http_parser import HttpParser
from karlooper.config import get_cli_data, set_cli_data
from karlooper.config.config import SOCKET_RECEIVE_SIZE, DEFAULT_PORT, CLIENT_CONNECT_TO_SERVER_NUM
__author__ = 'karlvorndoenitz@gmail.com'
class Application(object):
def __init__(self, handlers, settings=None, **kwargs):
"""
:param handlers: handlers mapping, dict type
:param settings: settings mapping, dict type
:param kwargs: options
"""
self.settings = settings
self.handlers = handlers
set_cli_data(self.settings)
set_cli_data(kwargs)
cli_data = get_cli_data()
self.port = int(cli_data.get("port", DEFAULT_PORT))
log_conf = self.settings.get("log_conf", None) if self.settings else kwargs.get("log_conf", None)
self.logger = init_logger(config_path=log_conf)
self.EOL1 = b'\n\n'
self.EOL2 = b'\n\r\n'
self.response = ""
def listen(self, port):
"""listen port
:param port: port that application listened
:return: None
"""
self.port = int(port)
def __run_epoll(self):
"""
run the application use epoll
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = epoll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
epoll.register(connection.fileno(), select.EPOLLIN) # register socket read event to epoll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.EPOLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
epoll.modify(fileno, select.EPOLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
epoll.unregister(fileno)
elif event & select.EPOLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
epoll.modify(fileno, select.EPOLLHUP)
elif event & select.EPOLLHUP: # if message sent and file number in epoll is hup
epoll.unregister(fileno) # remove file number from epoll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
epoll.close()
epoll = select.epoll()
epoll.register(server_socket.fileno(), select.EPOLLIN)
finally:
epoll.unregister(server_socket.fileno())
epoll.close()
server_socket.close()
def __run_kqueue(self):
"""
run server use kqueue
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM)
kq = select.kqueue()
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
index = 1
events = [
select.kevent(server_socket.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD),
select.kevent(server_socket.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
]
events_buf = []
while True:
try:
event_list = kq.control(events, 128, 0.0001) + events_buf
events_buf = []
except select.error as e:
self.logger.error("error in __run_kqueue: %s", str(e))
break
if event_list:
for each in event_list:
if each.ident == server_socket.fileno():
index += 1
conn, addr = server_socket.accept()
http_connection.add_connection(index, conn)
events.append(
select.kevent(
http_connection.get_connection(index).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=index
)
)
else:
try:
if each.udata >= 1 and each.filter == select.KQ_FILTER_READ:
http_parser = http_io_routine_pool.get(file_no=each.udata)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_routine_pool.remove(each.udata)
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
else:
conn = http_connection.get_connection(each.udata)
request_data = conn.recv(SOCKET_RECEIVE_SIZE)
request_data = request_data[:-2] if request_data.endswith("\r\n") else request_data
http_parser = HttpParser(
request_data,
handlers=self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(each.udata, data)
events.append(
select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata
)
)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_READ,
select.KQ_EV_ADD,
udata=each.udata)
)
else: # if coroutine
http_io_routine_pool.add(each.udata, http_parser)
events_buf.append(each)
elif each.udata >= 1 and each.filter == select.KQ_FILTER_WRITE:
conn = http_connection.get_connection(each.udata)
data = http_io_buffer.get_response(each.udata)
conn.send(data)
events.remove(select.kevent(
http_connection.get_connection(each.udata).fileno(),
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD,
udata=each.udata)
)
conn.close()
http_connection.remove_connection(each.udata)
except Exception as e:
self.logger.info("error in __run_kqueue event list: %s", str(e))
self.logger.info("each filter: %s", each.filter)
self.__remove_event(events, each)
http_connection.remove_connection(each.udata)
http_io_buffer.remove_request(each.udata)
http_io_buffer.remove_response(each.udata)
http_io_routine_pool.remove(each.udata)
kq.close()
kq = select.kqueue()
server_socket.close()
def __run_poll(self):
"""
run server use poll, I will modify __run_poll and __run_epoll in the future
"""
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', self.port))
server_socket.listen(CLIENT_CONNECT_TO_SERVER_NUM) # the number of client that connect to server
server_socket.setblocking(0) # set 0 not block other block
server_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
poll = select.poll()
poll.register(server_socket.fileno(), select.POLLIN)
try:
http_connection = HttpConnection()
http_io_buffer = HttpIOBuffer()
http_io_routine_pool = HttpIORoutinePool()
events_buf = []
while True:
events = poll.poll(1) + events_buf
events_buf = []
for fileno, event in events:
try:
if fileno == server_socket.fileno(): # if request come
connection, address = server_socket.accept() # waiting income connection
connection.setblocking(0) # none block
poll.register(connection.fileno(), select.POLLIN) # register socket read event to poll
http_connection.add_connection(connection.fileno(), connection)
http_io_buffer.add_request(connection.fileno(), b'')
http_io_buffer.add_response(connection.fileno(), self.response)
elif event & select.POLLIN: # when data in os's read buffer area
http_parser = http_io_routine_pool.get(file_no=fileno)
if http_parser:
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
http_request_buffer = http_connection.get_connection(fileno).recv(SOCKET_RECEIVE_SIZE)
http_io_buffer.add_request(
fileno,
http_io_buffer.get_request(fileno) + http_request_buffer
)
if self.EOL1 in http_io_buffer.get_request(fileno) \
or self.EOL2 in http_io_buffer.get_request(fileno):
request_data = http_io_buffer.get_request(fileno)[:-2] \
if http_io_buffer.get_request(fileno).endswith("\r\n") \
else http_io_buffer.get_request(fileno)
http_parser = HttpParser(
request_data,
self.handlers,
settings=self.settings
)
data = http_parser.parse()
if isinstance(data, str) or isinstance(data, unicode):
http_io_buffer.add_response(
fileno,
http_io_buffer.get_response(fileno) + data
)
poll.modify(fileno, select.POLLOUT) # change file number to epoll out mode
http_io_routine_pool.remove(fileno)
else: # if coroutine
http_io_routine_pool.add(fileno, http_parser)
events_buf.append((fileno, event))
else:
self.logger.error("connection error in __run_epoll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
poll.unregister(fileno)
elif event & select.POLLOUT: # if out mode
bytes_written = http_connection.get_connection(fileno).send(
http_io_buffer.get_response(fileno)
)
http_io_buffer.add_response(fileno, http_io_buffer.get_response(fileno)[bytes_written:])
if len(http_io_buffer.get_response(fileno)) == 0: # if file sent
http_connection.get_connection(fileno).shutdown(socket.SHUT_RDWR)
poll.modify(fileno, select.POLLHUP)
elif event & select.POLLHUP: # if message sent and file number in poll is hup
poll.unregister(fileno) # remove file number from poll
http_connection.get_connection(fileno).close() # close connection
http_connection.remove_connection(fileno) # delete connection from connections dict
except Exception as e:
self.logger.info("error in __run_poll: %s", str(e))
http_connection.remove_connection(fileno)
http_io_buffer.remove_request(fileno)
http_io_buffer.remove_response(fileno)
http_io_routine_pool.remove(fileno)
self.logger.info("fileno is: %s", str(fileno))
poll.unregister(fileno)
finally:
poll.unregister(server_socket.fileno())
poll.close()
server_socket.close()
def __run_async_io(self):
"""
run server use asyncore
"""
EchoServer('0.0.0.0', self.port, self.handlers, self.settings)
asyncore.loop()
def __remove_event(self, events, each):
"""remove event from events
:param events: the list contain some events
:param each: the event will be removed
:return: None
"""
self.logger.warning("remove event with udata: %s", str(each.udata))
for event in events:
if event.ident == each.ident:
events.remove(event)
break
def run(self, io_model=None):
"""run the web server
:param io_model: os io model, EPOLL 0 KQUEUE 1 POLL 2
:return: None
"""
print("server run on port: %d" % self.port)
self.logger.info("server run on port: %d" % self.port)
if io_model:
if io_model == IOModel.EPOLL and hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif io_model == IOModel.KQUEUE and hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif io_model == IOModel.POLL and hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
if hasattr(select, "epoll"):
print("run with epoll")
self.logger.info("run with epoll")
self.__run_epoll()
elif hasattr(select, "kqueue"):
print("run with kqueue")
self.logger.info("run with kqueue")
self.__run_kqueue()
elif hasattr(select, "poll"):
print("run with poll")
self.logger.info("run with poll")
self.__run_poll()
else:
print("run with asyncore")
self.logger.info("run with asyncore")
self.__run_async_io()
print("server start failed!")
self.logger.info("server start failed!")
|
# coding=utf-8
import hashlib
import hmac
import requests
import time
from operator import itemgetter
from .helpers import date_to_milliseconds, interval_to_milliseconds
from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
class Client(object):
API_URL = 'https://api.binance.com/api'
WITHDRAW_API_URL = 'https://api.binance.com/wapi'
WEBSITE_URL = 'https://www.binance.com'
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
# For accessing the data returned by Client.aggregate_trades().
AGG_ID = 'a'
AGG_PRICE = 'p'
AGG_QUANTITY = 'q'
AGG_FIRST_TRADE_ID = 'f'
AGG_LAST_TRADE_ID = 'l'
AGG_TIME = 'T'
AGG_BUYER_MAKES = 'm'
AGG_BEST_MATCH = 'M'
def __init__(self, api_key, api_secret, requests_params=None):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
:param requests_params: optional - Dictionary of requests params to use for all calls
:type requests_params: dict.
"""
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
self._requests_params = requests_params
# init DNS and SSL cert
self.ping()
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _generate_signature(self, data):
ordered_data = self._order_params(data)
query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data])
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
# sort parameters by key
params.sort(key=itemgetter(0))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
# set default requests timeout
kwargs['timeout'] = 10
# add our global requests params
if self._requests_params:
kwargs.update(self._requests_params)
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
# sort get and post params to match signature order
if data:
# find any requests params passed and apply them
if 'requests_params' in kwargs['data']:
# merge requests params into kwargs
kwargs.update(kwargs['data']['requests_params'])
del(kwargs['data']['requests_params'])
# sort post params
kwargs['data'] = self._order_params(kwargs['data'])
# if get request assign data array to params value for requests lib
if data and (method == 'get' or force_params):
kwargs['params'] = kwargs['data']
del(kwargs['data'])
response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response(response)
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _handle_response(self, response):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise BinanceAPIException(response)
try:
return response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceResponseException, BinanceAPIException
"""
products = self._request_website('get', 'exchange/public/product')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('exchangeInfo')
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._get('exchangeInfo')
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ping')
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('time')
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allPrices')
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allBookTickers')
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 100
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('depth', data=params)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('historicalTrades', data=params)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('aggTrades', data=params)
def aggregate_trade_iter(self, symbol, start_str=None, last_id=None):
"""Iterate over aggregate trade data from (start_time or last_id) to
the end of the history so far.
If start_time is specified, start with the first trade after
start_time. Meant to initialise a local cache of trade data.
If last_id is specified, start with the trade after it. This is meant
for updating a pre-existing local trade data cache.
Only allows start_str or last_id—not both. Not guaranteed to work
right if you're running more than one of these simultaneously. You
will probably hit your rate limit.
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Symbol string e.g. ETHBTC
:type symbol: str
:param start_str: Start date string in UTC format. The iterator will
return the first trade occurring later than this time.
:type start_str: str
:param last_id: aggregate trade ID of the last known aggregate trade.
Not a regular trade ID. See https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list.
:returns: an iterator of JSON objects, one per trade. The format of
each object is identical to Client.aggregate_trades().
:type last_id: int
"""
if start_str is not None and last_id is not None:
raise ValueError(
'start_time and last_id may not be simultaneously specified.')
# If there's no last_id, get one.
if last_id is None:
# Without a last_id, we actually need the first trade. Normally,
# we'd get rid of it. See the next loop.
if start_str is None:
trades = self.get_aggregate_trades(symbol=symbol, fromId=0)
else:
# It doesn't matter what the end time is, as long as it's less
# than a day and the result set contains at least one trade.
# A half a day should be fine.
start_ts = date_to_milliseconds(start_str)
trades = self.get_aggregate_trades(
symbol=symbol,
startTime=start_ts,
endTime=start_ts + (1000 * 86400 / 2))
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
while True:
# There is no need to wait between queries, to avoid hitting the
# rate limit. We're using blocking IO, and as long as we're the
# only thread running calls like this, Binance will automatically
# add the right delay time on their end, forcing us to wait for
# data. That really simplifies this function's job. Binance is
# fucking awesome.
trades = self.get_aggregate_trades(symbol=symbol, fromId=last_id)
# fromId=n returns a set starting with id n, but we already have
# that one. So get rid of the first item in the result set.
trades = trades[1:]
if len(trades) == 0:
return
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: str
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('klines', data=params)
def get_historical_klines(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[-1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
def get_account_status(self, **params):
"""Get account status detail.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#account-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "Order failed:Low Order fill rate! Will be reactivated after 5 minutes.",
"success": true,
"objs": [
"5"
]
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'accountStatus.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceResponseException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={})
return res['listenKey']
def stream_keepalive(self, listenKey):
"""PING a user data stream to prevent a time out.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._put('userDataStream', False, data=params)
def stream_close(self, listenKey):
"""Close out a user data stream.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._delete('userDataStream', False, data=params)
Add function to get system status
# coding=utf-8
import hashlib
import hmac
import requests
import time
from operator import itemgetter
from .helpers import date_to_milliseconds, interval_to_milliseconds
from .exceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
class Client(object):
API_URL = 'https://api.binance.com/api'
WITHDRAW_API_URL = 'https://api.binance.com/wapi'
WEBSITE_URL = 'https://www.binance.com'
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
# For accessing the data returned by Client.aggregate_trades().
AGG_ID = 'a'
AGG_PRICE = 'p'
AGG_QUANTITY = 'q'
AGG_FIRST_TRADE_ID = 'f'
AGG_LAST_TRADE_ID = 'l'
AGG_TIME = 'T'
AGG_BUYER_MAKES = 'm'
AGG_BEST_MATCH = 'M'
def __init__(self, api_key, api_secret, requests_params=None):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
:param requests_params: optional - Dictionary of requests params to use for all calls
:type requests_params: dict.
"""
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
self._requests_params = requests_params
# init DNS and SSL cert
self.ping()
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _generate_signature(self, data):
ordered_data = self._order_params(data)
query_string = '&'.join(["{}={}".format(d[0], d[1]) for d in ordered_data])
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
# sort parameters by key
params.sort(key=itemgetter(0))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
# set default requests timeout
kwargs['timeout'] = 10
# add our global requests params
if self._requests_params:
kwargs.update(self._requests_params)
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
# sort get and post params to match signature order
if data:
# find any requests params passed and apply them
if 'requests_params' in kwargs['data']:
# merge requests params into kwargs
kwargs.update(kwargs['data']['requests_params'])
del(kwargs['data']['requests_params'])
# sort post params
kwargs['data'] = self._order_params(kwargs['data'])
# if get request assign data array to params value for requests lib
if data and (method == 'get' or force_params):
kwargs['params'] = kwargs['data']
del(kwargs['data'])
response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response(response)
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _handle_response(self, response):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise BinanceAPIException(response)
try:
return response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceResponseException, BinanceAPIException
"""
products = self._request_website('get', 'exchange/public/product')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('exchangeInfo')
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._get('exchangeInfo')
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ping')
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('time')
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allPrices')
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allBookTickers')
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 100
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('depth', data=params)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('historicalTrades', data=params)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('aggTrades', data=params)
def aggregate_trade_iter(self, symbol, start_str=None, last_id=None):
"""Iterate over aggregate trade data from (start_time or last_id) to
the end of the history so far.
If start_time is specified, start with the first trade after
start_time. Meant to initialise a local cache of trade data.
If last_id is specified, start with the trade after it. This is meant
for updating a pre-existing local trade data cache.
Only allows start_str or last_id—not both. Not guaranteed to work
right if you're running more than one of these simultaneously. You
will probably hit your rate limit.
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Symbol string e.g. ETHBTC
:type symbol: str
:param start_str: Start date string in UTC format. The iterator will
return the first trade occurring later than this time.
:type start_str: str
:param last_id: aggregate trade ID of the last known aggregate trade.
Not a regular trade ID. See https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list.
:returns: an iterator of JSON objects, one per trade. The format of
each object is identical to Client.aggregate_trades().
:type last_id: int
"""
if start_str is not None and last_id is not None:
raise ValueError(
'start_time and last_id may not be simultaneously specified.')
# If there's no last_id, get one.
if last_id is None:
# Without a last_id, we actually need the first trade. Normally,
# we'd get rid of it. See the next loop.
if start_str is None:
trades = self.get_aggregate_trades(symbol=symbol, fromId=0)
else:
# It doesn't matter what the end time is, as long as it's less
# than a day and the result set contains at least one trade.
# A half a day should be fine.
start_ts = date_to_milliseconds(start_str)
trades = self.get_aggregate_trades(
symbol=symbol,
startTime=start_ts,
endTime=start_ts + (1000 * 86400 / 2))
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
while True:
# There is no need to wait between queries, to avoid hitting the
# rate limit. We're using blocking IO, and as long as we're the
# only thread running calls like this, Binance will automatically
# add the right delay time on their end, forcing us to wait for
# data. That really simplifies this function's job. Binance is
# fucking awesome.
trades = self.get_aggregate_trades(symbol=symbol, fromId=last_id)
# fromId=n returns a set starting with id n, but we already have
# that one. So get rid of the first item in the result set.
trades = trades[1:]
if len(trades) == 0:
return
for t in trades:
yield t
last_id = trades[-1][self.AGG_ID]
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: str
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('klines', data=params)
def get_historical_klines(self, symbol, interval, start_str, end_str=None):
"""Get Historical Klines from Binance
See dateparser docs for valid start and end string formats http://dateparser.readthedocs.io/en/latest/
If using offset strings for dates add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
:param symbol: Name of symbol pair e.g BNBBTC
:type symbol: str
:param interval: Biannce Kline interval
:type interval: str
:param start_str: Start date string in UTC format
:type start_str: str
:param end_str: optional - end date string in UTC format
:type end_str: str
:return: list of OHLCV values
"""
# init our list
output_data = []
# setup the max limit
limit = 500
# convert interval to useful value in seconds
timeframe = interval_to_milliseconds(interval)
# convert our date strings to milliseconds
start_ts = date_to_milliseconds(start_str)
# if an end time was passed convert it
end_ts = None
if end_str:
end_ts = date_to_milliseconds(end_str)
idx = 0
# it can be difficult to know when a symbol was listed on Binance so allow start time to be before list date
symbol_existed = False
while True:
# fetch the klines from start_ts up to max 500 entries or the end_ts if set
temp_data = self.get_klines(
symbol=symbol,
interval=interval,
limit=limit,
startTime=start_ts,
endTime=end_ts
)
# handle the case where our start date is before the symbol pair listed on Binance
if not symbol_existed and len(temp_data):
symbol_existed = True
if symbol_existed:
# append this loops data to our output data
output_data += temp_data
# update our start timestamp using the last value in the array and add the interval timeframe
start_ts = temp_data[-1][0] + timeframe
else:
# it wasn't listed yet, increment our start date
start_ts += timeframe
idx += 1
# check if we received less than the required limit and exit the loop
if len(temp_data) < limit:
# exit the while loop
break
# sleep after every 3rd call to be kind to the API
if idx % 3 == 0:
time.sleep(1)
return output_data
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: str
:param type: required
:type type: str
:param timeInForce: required if limit order
:type timeInForce: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: optional
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_asset_balance(self, asset, **params):
"""Get current asset balance.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: dictionary or None if not found
.. code-block:: python
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self.get_account(**params)
# find asset balance in list of balances
if "balances" in res:
for bal in res['balances']:
if bal['asset'].lower() == asset.lower():
return bal
return None
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
def get_system_status(self):
"""Get system status detail.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#system-status-system
:returns: API response
.. code-block:: python
{
"status": 0, # 0: normal,1:system maintenance
"msg": "normal" # normal or System maintenance.
}
:raises: BinanceAPIException
"""
return self._request_withdraw_api('get', 'systemStatus.html')
def get_account_status(self, **params):
"""Get account status detail.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#account-status-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "Order failed:Low Order fill rate! Will be reactivated after 5 minutes.",
"success": true,
"objs": [
"5"
]
}
:raises: BinanceWithdrawException
"""
res = self._request_withdraw_api('get', 'accountStatus.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceResponseException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={})
return res['listenKey']
def stream_keepalive(self, listenKey):
"""PING a user data stream to prevent a time out.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._put('userDataStream', False, data=params)
def stream_close(self, listenKey):
"""Close out a user data stream.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
params = {
'listenKey': listenKey
}
return self._delete('userDataStream', False, data=params)
|
from __future__ import absolute_import
from functools import partial
from . import idiokit
from ._selectloop import cancel as selectloop_cancel, sleep as selectloop_sleep
def _cancel(node, _, __):
selectloop_cancel(node)
def sleep(delay):
event = idiokit.Event()
node = selectloop_sleep(delay, event.succeed)
event.result().listen(partial(_cancel, node))
return event
class Timeout(Exception):
pass
def timeout(timeout, stream=None, throw=Timeout()):
if stream is None:
stream = idiokit.Event()
node = selectloop_sleep(timeout, stream.throw, throw)
stream.result().listen(partial(_cancel, node))
return stream
idiokit: idiokit.sleep() (without argument) sleeps forever (until a signal is received).
from __future__ import absolute_import
from functools import partial
from . import idiokit
from ._selectloop import cancel as selectloop_cancel, sleep as selectloop_sleep
def _cancel(node, _, __):
selectloop_cancel(node)
def sleep(delay=None):
event = idiokit.Event()
if delay is not None:
node = selectloop_sleep(delay, event.succeed)
event.result().listen(partial(_cancel, node))
return event
class Timeout(Exception):
pass
def timeout(timeout, stream=None, throw=Timeout()):
if stream is None:
stream = idiokit.Event()
node = selectloop_sleep(timeout, stream.throw, throw)
stream.result().listen(partial(_cancel, node))
return stream
|
import os
import random
import re
import shutil
import subprocess
import tempfile
import time
from assertions import assert_almost_equal, assert_one
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from ccmlib.node import NodeError
from dtest import Tester, debug
from tools import (InterruptBootstrap, KillOnBootstrap, known_failure,
new_node, query_c1c2, since)
class TestBootstrap(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
self.allow_log_errors = True
def simple_bootstrap_test(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(2)
cluster.set_configuration_options(values={'num_tokens': 1})
debug("[node1, node2] tokens: %r" % (tokens,))
keys = 10000
# Create a single node cluster
cluster.populate(1)
node1 = cluster.nodelist()[0]
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.start(wait_other_notice=True)
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# record the size before inserting any of our own data
empty_size = node1.data_size()
debug("node1 empty size : %s" % float(empty_size))
insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)])
node1.flush()
node1.compact()
initial_size = node1.data_size()
debug("node1 size before bootstrapping node2: %s" % float(initial_size))
# Reads inserted data all during the bootstrap process. We shouldn't
# get any error
reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE))
# Bootstraping a new node
node2 = new_node(cluster)
node2.set_configuration_options(values={'initial_token': tokens[1]})
node2.start(wait_for_binary_proto=True)
node2.compact()
reader.check()
node1.cleanup()
debug("node1 size after cleanup: %s" % float(node1.data_size()))
node1.compact()
debug("node1 size after compacting: %s" % float(node1.data_size()))
time.sleep(.5)
reader.check()
debug("node2 size after compacting: %s" % float(node2.data_size()))
size1 = float(node1.data_size())
size2 = float(node2.data_size())
assert_almost_equal(size1, size2, error=0.3)
assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size)))
def simple_bootstrap_test_nodata(self):
"""
@jira_ticket CASSANDRA-11010
Test that bootstrap completes if streaming from nodes with no data
"""
cluster = self.cluster
# Create a two-node cluster
cluster.populate(2)
cluster.start(wait_other_notice=True)
# Bootstraping a new node
node3 = new_node(cluster)
node3.start()
node3.watch_log_for("Starting listening for CQL clients")
session = self.exclusive_cql_connection(node3)
rows = session.execute("SELECT bootstrapped FROM system.local WHERE key='local'")
self.assertEqual(rows[0][0], 'COMPLETED')
def read_from_bootstrapped_node_test(self):
"""Test bootstrapped node sees existing data, eg. CASSANDRA-6648"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=10000', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
node4 = new_node(cluster)
node4.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node4)
new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
self.assertEquals(original_rows, new_rows)
@since('2.2')
@known_failure(failure_source='systemic',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-10912',
flaky=True)
def resumable_bootstrap_test(self):
"""Test resuming bootstrap after data streaming failure"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100000', '-schema', 'replication(factor=2)'])
node1.flush()
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
node3.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
# keep timeout low so that test won't hang
node3.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
try:
node3.start()
except NodeError:
pass # node doesn't start as expected
t.join()
# wait for node3 ready to query
node3.watch_log_for("Starting listening for CQL clients")
mark = node3.mark_log()
# check if node3 is still in bootstrap mode
session = self.exclusive_cql_connection(node3)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'IN_PROGRESS', rows[0][0]
# bring back node1 and invoke nodetool bootstrap to resume bootstrapping
node1.start(wait_other_notice=True)
node3.nodetool('bootstrap resume')
# check if we skipped already retrieved ranges
node3.watch_log_for("already available. Skipping streaming.")
node3.watch_log_for("Resume complete", from_mark=mark)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert rows[0][0] == 'COMPLETED', rows[0][0]
@since('2.2')
def bootstrap_with_reset_bootstrap_state_test(self):
"""Test bootstrap with resetting bootstrap progress"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100000', '-schema', 'replication(factor=2)'])
node1.flush()
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
try:
node3.start()
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node3 bootstrap with resetting bootstrap progress
node3.stop()
mark = node3.mark_log()
node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
# check if we reset bootstrap state
node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node3.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
session = self.exclusive_cql_connection(node3)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'COMPLETED', rows[0][0]
def manual_bootstrap_test(self):
"""Test adding a new node and bootstrappig it manually. No auto_bootstrap.
This test also verify that all data are OK after the addition of the new node.
eg. CASSANDRA-9022
"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
(node1, node2) = cluster.nodelist()
node1.stress(['write', 'n=1000', '-schema', 'replication(factor=2)',
'-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)'])
session = self.patient_exclusive_cql_connection(node2)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % stress_table))
# Add a new node
node3 = new_node(cluster, bootstrap=False)
node3.start(wait_for_binary_proto=True)
node3.repair()
node1.cleanup()
current_rows = list(session.execute("SELECT * FROM %s" % stress_table))
self.assertEquals(original_rows, current_rows)
def local_quorum_bootstrap_test(self):
"""Test that CL local_quorum works while a node is bootstrapping. CASSANDRA-8058"""
cluster = self.cluster
cluster.populate([1, 1])
cluster.start()
node1 = cluster.nodes['node1']
yaml_config = """
# Create the keyspace and table
keyspace: keyspace1
keyspace_definition: |
CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};
table: users
table_definition:
CREATE TABLE users (
username text,
first_name text,
last_name text,
email text,
PRIMARY KEY(username)
) WITH compaction = {'class':'SizeTieredCompactionStrategy'};
insert:
partitions: fixed(1)
batchtype: UNLOGGED
queries:
read:
cql: select * from users where username = ?
fields: samerow
"""
stress_config = tempfile.NamedTemporaryFile(mode='w+', delete=False)
stress_config.write(yaml_config)
stress_config.close()
node1.stress(['user', 'profile=' + stress_config.name, 'n=2000000',
'ops(insert=1)', '-rate', 'threads=50'])
node3 = new_node(cluster, data_center='dc2')
node3.start(no_wait=True)
time.sleep(3)
with tempfile.TemporaryFile(mode='w+') as tmpfile:
node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)',
'n=500000', 'cl=LOCAL_QUORUM',
'-rate', 'threads=5',
'-errors', 'retries=2'],
stdout=tmpfile, stderr=subprocess.STDOUT)
os.unlink(stress_config.name)
tmpfile.seek(0)
output = tmpfile.read()
debug(output)
regex = re.compile("Operation.+error inserting key.+Exception")
failure = regex.search(output)
self.assertIsNone(failure, "Error during stress while bootstrapping")
def shutdown_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=True)
def killed_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=False)
def _wiped_node_cannot_join_test(self, gently):
"""
@jira_ticket CASSANDRA-9765
Test that if we stop a node and wipe its data then the node cannot join
when it is not a seed. Test both a nice shutdown or a forced shutdown, via
the gently parameter.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10000', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Stop the new node and wipe its data
node4.stop(gently=gently)
self._cleanup(node4)
# Now start it, it should not be allowed to join.
mark = node4.mark_log()
node4.start(no_wait=True)
node4.watch_log_for("A node with address /127.0.0.4 already exists, cancelling join", from_mark=mark)
def decommissioned_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if we decommission a node and then wipe its data, it can join the cluster.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10K', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True, wait_other_notice=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Decommission the new node and wipe its data
node4.decommission()
node4.stop(wait_other_notice=True)
self._cleanup(node4)
# Now start it, it should be allowed to join
mark = node4.mark_log()
node4.start(wait_other_notice=True)
node4.watch_log_for("JOINING:", from_mark=mark)
def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
"""
@jira_ticket CASSANDRA-8072
@jira_ticket CASSANDRA-8422
Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single
seed node.
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# Decommision the new node and kill it
debug("Decommissioning & stopping node2")
node2.decommission()
node2.stop(wait_other_notice=False)
# Wipe its data
for data_dir in node2.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
commitlog_dir = os.path.join(node2.get_path(), 'commitlogs')
debug("Deleting {}".format(commitlog_dir))
shutil.rmtree(commitlog_dir)
# Now start it, it should be allowed to join
mark = node2.mark_log()
debug("Restarting wiped node2")
node2.start(wait_other_notice=False)
node2.watch_log_for("JOINING:", from_mark=mark)
def failed_bootstrap_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped.
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data, enough for the bootstrap to fail later on
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=100000', '-rate', 'threads=8'])
node1.flush()
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
# kill node2 in the middle of bootstrap
t = KillOnBootstrap(node2)
t.start()
node2.start()
t.join()
self.assertFalse(node2.is_running())
# wipe any data for node2
self._cleanup(node2)
# Now start it again, it should be allowed to join
mark = node2.mark_log()
node2.start(wait_other_notice=True)
node2.watch_log_for("JOINING:", from_mark=mark)
@since('2.1.1')
def simultaneous_bootstrap_test(self):
"""
Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.
Start a one node cluster and run a stress write workload.
Start up a second node, and wait for the first node to detect it has joined the cluster.
While the second node is bootstrapping, start a third node. This should fail.
@jira_ticket CASSANDRA-7069
@jira_ticket CASSANDRA-9484
"""
bootstrap_error = ("Other bootstrapping/leaving/moving nodes detected,"
" cannot bootstrap while cassandra.consistent.rangemovement is true")
self.ignore_log_patterns.append(bootstrap_error)
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
node1, = cluster.nodelist()
node1.stress(['write', 'n=500K', '-schema', 'replication(factor=1)',
'-rate', 'threads=10'])
node2 = new_node(cluster)
node2.start(wait_other_notice=True)
node3 = new_node(cluster, remote_debug_port='2003')
process = node3.start()
stdout, stderr = process.communicate()
self.assertIn(bootstrap_error, stderr, msg=stderr)
time.sleep(.5)
self.assertFalse(node3.is_running(), msg="Two nodes bootstrapped simultaneously")
node2.watch_log_for("Starting listening for CQL clients")
session = self.patient_exclusive_cql_connection(node2)
# Repeat the select count(*) query, to help catch
# bugs like 9484, where count(*) fails at higher
# data loads.
for _ in xrange(5):
assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
def _cleanup(self, node):
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
for data_dir in node.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
shutil.rmtree(commitlog_dir)
Don't wait other notice after decommission
The decommission will have already caused the node DOWN
notifications to come up. So we won't see more on stop, so waiting
will cause us to timeout
import os
import random
import re
import shutil
import subprocess
import tempfile
import time
from assertions import assert_almost_equal, assert_one
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from ccmlib.node import NodeError
from dtest import Tester, debug
from tools import (InterruptBootstrap, KillOnBootstrap, known_failure,
new_node, query_c1c2, since)
class TestBootstrap(Tester):
def __init__(self, *args, **kwargs):
kwargs['cluster_options'] = {'start_rpc': 'true'}
# Ignore these log patterns:
self.ignore_log_patterns = [
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
]
Tester.__init__(self, *args, **kwargs)
self.allow_log_errors = True
def simple_bootstrap_test(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(2)
cluster.set_configuration_options(values={'num_tokens': 1})
debug("[node1, node2] tokens: %r" % (tokens,))
keys = 10000
# Create a single node cluster
cluster.populate(1)
node1 = cluster.nodelist()[0]
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.start(wait_other_notice=True)
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# record the size before inserting any of our own data
empty_size = node1.data_size()
debug("node1 empty size : %s" % float(empty_size))
insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)])
node1.flush()
node1.compact()
initial_size = node1.data_size()
debug("node1 size before bootstrapping node2: %s" % float(initial_size))
# Reads inserted data all during the bootstrap process. We shouldn't
# get any error
reader = self.go(lambda _: query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE))
# Bootstraping a new node
node2 = new_node(cluster)
node2.set_configuration_options(values={'initial_token': tokens[1]})
node2.start(wait_for_binary_proto=True)
node2.compact()
reader.check()
node1.cleanup()
debug("node1 size after cleanup: %s" % float(node1.data_size()))
node1.compact()
debug("node1 size after compacting: %s" % float(node1.data_size()))
time.sleep(.5)
reader.check()
debug("node2 size after compacting: %s" % float(node2.data_size()))
size1 = float(node1.data_size())
size2 = float(node2.data_size())
assert_almost_equal(size1, size2, error=0.3)
assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size)))
def simple_bootstrap_test_nodata(self):
"""
@jira_ticket CASSANDRA-11010
Test that bootstrap completes if streaming from nodes with no data
"""
cluster = self.cluster
# Create a two-node cluster
cluster.populate(2)
cluster.start(wait_other_notice=True)
# Bootstraping a new node
node3 = new_node(cluster)
node3.start()
node3.watch_log_for("Starting listening for CQL clients")
session = self.exclusive_cql_connection(node3)
rows = session.execute("SELECT bootstrapped FROM system.local WHERE key='local'")
self.assertEqual(rows[0][0], 'COMPLETED')
def read_from_bootstrapped_node_test(self):
"""Test bootstrapped node sees existing data, eg. CASSANDRA-6648"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=10000', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
node4 = new_node(cluster)
node4.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node4)
new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,)))
self.assertEquals(original_rows, new_rows)
@since('2.2')
@known_failure(failure_source='systemic',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-10912',
flaky=True)
def resumable_bootstrap_test(self):
"""Test resuming bootstrap after data streaming failure"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100000', '-schema', 'replication(factor=2)'])
node1.flush()
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
node3.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
# keep timeout low so that test won't hang
node3.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})
try:
node3.start()
except NodeError:
pass # node doesn't start as expected
t.join()
# wait for node3 ready to query
node3.watch_log_for("Starting listening for CQL clients")
mark = node3.mark_log()
# check if node3 is still in bootstrap mode
session = self.exclusive_cql_connection(node3)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'IN_PROGRESS', rows[0][0]
# bring back node1 and invoke nodetool bootstrap to resume bootstrapping
node1.start(wait_other_notice=True)
node3.nodetool('bootstrap resume')
# check if we skipped already retrieved ranges
node3.watch_log_for("already available. Skipping streaming.")
node3.watch_log_for("Resume complete", from_mark=mark)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert rows[0][0] == 'COMPLETED', rows[0][0]
@since('2.2')
def bootstrap_with_reset_bootstrap_state_test(self):
"""Test bootstrap with resetting bootstrap progress"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100000', '-schema', 'replication(factor=2)'])
node1.flush()
# kill node1 in the middle of streaming to let it fail
t = InterruptBootstrap(node1)
t.start()
# start bootstrapping node3 and wait for streaming
node3 = new_node(cluster)
try:
node3.start()
except NodeError:
pass # node doesn't start as expected
t.join()
node1.start()
# restart node3 bootstrap with resetting bootstrap progress
node3.stop()
mark = node3.mark_log()
node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"])
# check if we reset bootstrap state
node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark)
# wait for node3 ready to query
node3.watch_log_for("Listening for thrift clients...", from_mark=mark)
# check if 2nd bootstrap succeeded
session = self.exclusive_cql_connection(node3)
rows = list(session.execute("SELECT bootstrapped FROM system.local WHERE key='local'"))
assert len(rows) == 1
assert rows[0][0] == 'COMPLETED', rows[0][0]
def manual_bootstrap_test(self):
"""Test adding a new node and bootstrappig it manually. No auto_bootstrap.
This test also verify that all data are OK after the addition of the new node.
eg. CASSANDRA-9022
"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
(node1, node2) = cluster.nodelist()
node1.stress(['write', 'n=1000', '-schema', 'replication(factor=2)',
'-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)'])
session = self.patient_exclusive_cql_connection(node2)
stress_table = 'keyspace1.standard1'
original_rows = list(session.execute("SELECT * FROM %s" % stress_table))
# Add a new node
node3 = new_node(cluster, bootstrap=False)
node3.start(wait_for_binary_proto=True)
node3.repair()
node1.cleanup()
current_rows = list(session.execute("SELECT * FROM %s" % stress_table))
self.assertEquals(original_rows, current_rows)
def local_quorum_bootstrap_test(self):
"""Test that CL local_quorum works while a node is bootstrapping. CASSANDRA-8058"""
cluster = self.cluster
cluster.populate([1, 1])
cluster.start()
node1 = cluster.nodes['node1']
yaml_config = """
# Create the keyspace and table
keyspace: keyspace1
keyspace_definition: |
CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1};
table: users
table_definition:
CREATE TABLE users (
username text,
first_name text,
last_name text,
email text,
PRIMARY KEY(username)
) WITH compaction = {'class':'SizeTieredCompactionStrategy'};
insert:
partitions: fixed(1)
batchtype: UNLOGGED
queries:
read:
cql: select * from users where username = ?
fields: samerow
"""
stress_config = tempfile.NamedTemporaryFile(mode='w+', delete=False)
stress_config.write(yaml_config)
stress_config.close()
node1.stress(['user', 'profile=' + stress_config.name, 'n=2000000',
'ops(insert=1)', '-rate', 'threads=50'])
node3 = new_node(cluster, data_center='dc2')
node3.start(no_wait=True)
time.sleep(3)
with tempfile.TemporaryFile(mode='w+') as tmpfile:
node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)',
'n=500000', 'cl=LOCAL_QUORUM',
'-rate', 'threads=5',
'-errors', 'retries=2'],
stdout=tmpfile, stderr=subprocess.STDOUT)
os.unlink(stress_config.name)
tmpfile.seek(0)
output = tmpfile.read()
debug(output)
regex = re.compile("Operation.+error inserting key.+Exception")
failure = regex.search(output)
self.assertIsNone(failure, "Error during stress while bootstrapping")
def shutdown_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=True)
def killed_wiped_node_cannot_join_test(self):
self._wiped_node_cannot_join_test(gently=False)
def _wiped_node_cannot_join_test(self, gently):
"""
@jira_ticket CASSANDRA-9765
Test that if we stop a node and wipe its data then the node cannot join
when it is not a seed. Test both a nice shutdown or a forced shutdown, via
the gently parameter.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10000', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Stop the new node and wipe its data
node4.stop(gently=gently)
self._cleanup(node4)
# Now start it, it should not be allowed to join.
mark = node4.mark_log()
node4.start(no_wait=True)
node4.watch_log_for("A node with address /127.0.0.4 already exists, cancelling join", from_mark=mark)
def decommissioned_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if we decommission a node and then wipe its data, it can join the cluster.
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=10K', '-rate', 'threads=8'])
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node4 = new_node(cluster, bootstrap=True)
node4.start(wait_for_binary_proto=True, wait_other_notice=True)
session = self.patient_cql_connection(node4)
self.assertEquals(original_rows, list(session.execute("SELECT * FROM {}".format(stress_table,))))
# Decommission the new node and wipe its data
node4.decommission()
node4.stop()
self._cleanup(node4)
# Now start it, it should be allowed to join
mark = node4.mark_log()
node4.start(wait_other_notice=True)
node4.watch_log_for("JOINING:", from_mark=mark)
def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
"""
@jira_ticket CASSANDRA-8072
@jira_ticket CASSANDRA-8422
Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single
seed node.
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# Decommision the new node and kill it
debug("Decommissioning & stopping node2")
node2.decommission()
node2.stop(wait_other_notice=False)
# Wipe its data
for data_dir in node2.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
commitlog_dir = os.path.join(node2.get_path(), 'commitlogs')
debug("Deleting {}".format(commitlog_dir))
shutil.rmtree(commitlog_dir)
# Now start it, it should be allowed to join
mark = node2.mark_log()
debug("Restarting wiped node2")
node2.start(wait_other_notice=False)
node2.watch_log_for("JOINING:", from_mark=mark)
def failed_bootstrap_wiped_node_can_join_test(self):
"""
@jira_ticket CASSANDRA-9765
Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped.
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
stress_table = 'keyspace1.standard1'
# write some data, enough for the bootstrap to fail later on
node1 = cluster.nodelist()[0]
node1.stress(['write', 'n=100000', '-rate', 'threads=8'])
node1.flush()
session = self.patient_cql_connection(node1)
original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,)))
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
# kill node2 in the middle of bootstrap
t = KillOnBootstrap(node2)
t.start()
node2.start()
t.join()
self.assertFalse(node2.is_running())
# wipe any data for node2
self._cleanup(node2)
# Now start it again, it should be allowed to join
mark = node2.mark_log()
node2.start(wait_other_notice=True)
node2.watch_log_for("JOINING:", from_mark=mark)
@since('2.1.1')
def simultaneous_bootstrap_test(self):
"""
Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere.
Start a one node cluster and run a stress write workload.
Start up a second node, and wait for the first node to detect it has joined the cluster.
While the second node is bootstrapping, start a third node. This should fail.
@jira_ticket CASSANDRA-7069
@jira_ticket CASSANDRA-9484
"""
bootstrap_error = ("Other bootstrapping/leaving/moving nodes detected,"
" cannot bootstrap while cassandra.consistent.rangemovement is true")
self.ignore_log_patterns.append(bootstrap_error)
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
node1, = cluster.nodelist()
node1.stress(['write', 'n=500K', '-schema', 'replication(factor=1)',
'-rate', 'threads=10'])
node2 = new_node(cluster)
node2.start(wait_other_notice=True)
node3 = new_node(cluster, remote_debug_port='2003')
process = node3.start()
stdout, stderr = process.communicate()
self.assertIn(bootstrap_error, stderr, msg=stderr)
time.sleep(.5)
self.assertFalse(node3.is_running(), msg="Two nodes bootstrapped simultaneously")
node2.watch_log_for("Starting listening for CQL clients")
session = self.patient_exclusive_cql_connection(node2)
# Repeat the select count(*) query, to help catch
# bugs like 9484, where count(*) fails at higher
# data loads.
for _ in xrange(5):
assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE)
def _cleanup(self, node):
commitlog_dir = os.path.join(node.get_path(), 'commitlogs')
for data_dir in node.data_directories():
debug("Deleting {}".format(data_dir))
shutil.rmtree(data_dir)
shutil.rmtree(commitlog_dir)
|
from .forms import PostForm, ProjectPostForm, DebatePostForm, WorkhourPostForm, PlanbookPostForm
from .models import Post, DebatePost, ProjectPost, Product
from .constants import *
MAP_MODEL_POST = {
BOARD_ROLE['DEFAULT']:Post,
BOARD_ROLE['PROJECT']:ProjectPost,
BOARD_ROLE['DEBATE']:DebatePost,
BOARD_ROLE['PLANBOOK']:Post,
BOARD_ROLE['ARCHIVING']:Post,
BOARD_ROLE['WORKHOUR']:Post,
BOARD_ROLE['SWIPER']:Post,
BOARD_ROLE['STORE']:Product,
}
MAP_FORM_POST = {
BOARD_ROLE['DEFAULT']:PostForm,
BOARD_ROLE['PROJECT']:ProjectPostForm,
BOARD_ROLE['DEBATE']:DebatePostForm,
BOARD_ROLE['ARCHIVING']:PostForm,
BOARD_ROLE['PLANBOOK']:PlanbookPostForm,
BOARD_ROLE['WORKHOUR']:WorkhourPostForm,
BOARD_ROLE['SWIPER']:PostForm,
BOARD_ROLE['STORE']:PostForm,
}
Update constants_mapping.py
from .forms import PostForm, ProjectPostForm, DebatePostForm, WorkhourPostForm, PlanbookPostForm
from .models import Post, DebatePost, ProjectPost, Product
from .constants import *
MAP_MODEL_POST = {
BOARD_ROLE['DEFAULT']:Post,
BOARD_ROLE['PROJECT']:ProjectPost,
BOARD_ROLE['DEBATE']:DebatePost,
BOARD_ROLE['PLANBOOK']:Post,
BOARD_ROLE['ARCHIVING']:Post,
BOARD_ROLE['WORKHOUR']:Post,
BOARD_ROLE['SWIPER']:Post,
BOARD_ROLE['STORE']:Product,
}
MAP_FORM_POST = {
BOARD_ROLE['DEFAULT']:PostForm,
BOARD_ROLE['PROJECT']:ProjectPostForm,
BOARD_ROLE['DEBATE']:DebatePostForm,
BOARD_ROLE['ARCHIVING']:PostForm,
BOARD_ROLE['PLANBOOK']:PlanbookPostForm,
BOARD_ROLE['WORKHOUR']:WorkhourPostForm,
BOARD_ROLE['SWIPER']:PostForm,
BOARD_ROLE['STORE']:PostForm,
}
|
# Django settings for exampleSettings project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
import os
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
### stuff for the celery task queue
BROKER_URL = 'django://'
CELERY_ALWAYS_EAGER = True
#CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'exampleSettings.sqlite3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(PROJECT_PATH, 'whoosh_index'),
},
}
#HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
#Sets the url that phantomJS should use for all of its rendering. Must be an instance of the site.
URL = 'http://localhost:8000'
#the path to the phantomjs binary, properly escaped.
PHANTOMJSPATH = "phantomjs"
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_PATH + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = PROJECT_PATH + '/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
'static_precompiler.finders.StaticPrecompilerFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
#Let's try and cache the inline javascript so you don't need to generate it
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': PROJECT_PATH+'/django_cache',
},
'javascript': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': PROJECT_PATH+'/coffeecache',
}
}
STATIC_PRECOMPILER_COMPILERS = (
'static_precompiler.compilers.CoffeeScript',
'static_precompiler.compilers.SCSS',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#if DEBUG ==True:
if False:
MIDDLEWARE_CLASSES += ('Settings.BeautifulMiddleware.BeautifulMiddleware',)
ROOT_URLCONF = 'Settings.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'Settings.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'searchsettings',
'thumbnailer',
'project',
'multiuploader',
'exampleTheme',
'userProfile',
'captcha',
'filemanager',
'organization',
'avatarBot',
'taggit',
'taggit_autocomplete',
'testcases',
'djangoratings',
'threadedcomments',
'django.contrib.comments',
#So we can inline coffeescript
'static_precompiler',
# 'djcelery',
'kombu.transport.django',
# 'celery_haystack',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
COMMENTS_APP = 'threadedcomments'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if False:
# make all loggers use the console.
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['handlers'] = ['console']
MULTI_FILE_DELETE_URL = 'multi_delete'
MULTI_IMAGE_URL = 'multi_image'
MULTI_IMAGES_FOLDER = 'multiuploader_images'
### autorize the use of user profile I guess.... (spike, take a letter: user profile . user profile . user profile . user profile)
AUTH_PROFILE_MODULE = "userProfile.userProfile"
Celery settings for debugging.
# Django settings for exampleSettings project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
import os
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
### stuff for the celery task queue
BROKER_URL = 'django://'
#Makes debugging easier. Don't use in production
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS =True
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'exampleSettings.sqlite3', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(PROJECT_PATH, 'whoosh_index'),
},
}
#HAYSTACK_SIGNAL_PROCESSOR = 'celery_haystack.signals.CelerySignalProcessor'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
#Sets the url that phantomJS should use for all of its rendering. Must be an instance of the site.
URL = 'http://localhost:8000'
#the path to the phantomjs binary, properly escaped.
PHANTOMJSPATH = "phantomjs"
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_PATH + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = PROJECT_PATH + '/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
'static_precompiler.finders.StaticPrecompilerFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
)
#Let's try and cache the inline javascript so you don't need to generate it
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': PROJECT_PATH+'/django_cache',
},
'javascript': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': PROJECT_PATH+'/coffeecache',
}
}
STATIC_PRECOMPILER_COMPILERS = (
'static_precompiler.compilers.CoffeeScript',
'static_precompiler.compilers.SCSS',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
#if DEBUG ==True:
if False:
MIDDLEWARE_CLASSES += ('Settings.BeautifulMiddleware.BeautifulMiddleware',)
ROOT_URLCONF = 'Settings.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'Settings.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'searchsettings',
'thumbnailer',
'project',
'multiuploader',
'exampleTheme',
'userProfile',
'captcha',
'filemanager',
'organization',
'avatarBot',
'taggit',
'taggit_autocomplete',
'testcases',
'djangoratings',
'threadedcomments',
'django.contrib.comments',
#So we can inline coffeescript
'static_precompiler',
# 'djcelery',
'kombu.transport.django',
# 'celery_haystack',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
COMMENTS_APP = 'threadedcomments'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if False:
# make all loggers use the console.
for logger in LOGGING['loggers']:
LOGGING['loggers'][logger]['handlers'] = ['console']
MULTI_FILE_DELETE_URL = 'multi_delete'
MULTI_IMAGE_URL = 'multi_image'
MULTI_IMAGES_FOLDER = 'multiuploader_images'
### autorize the use of user profile I guess.... (spike, take a letter: user profile . user profile . user profile . user profile)
AUTH_PROFILE_MODULE = "userProfile.userProfile"
|
"""LIF Ensemble
Takes an intermediate representation of a LIF ensemble and returns a vertex and
appropriate callbacks to load and prepare the ensemble for simulation on
SpiNNaker. The build method also manages the partitioning of the ensemble into
appropriate sized slices.
"""
import collections
import numpy as np
from rig.machine import Cores, SDRAM
from six import iteritems
import struct
from nengo_spinnaker.builder.builder import InputPort, netlistspec, OutputPort
from nengo_spinnaker.builder.ports import EnsembleInputPort
from nengo_spinnaker.regions.filters import make_filter_regions
from .. import regions
from nengo_spinnaker.netlist import VertexSlice
from nengo_spinnaker import partition_and_cluster as partition
from nengo_spinnaker.utils.application import get_application
from nengo_spinnaker.utils import type_casts as tp
class EnsembleLIF(object):
"""Controller for an ensemble of LIF neurons."""
def __init__(self, ensemble):
"""Create a new LIF ensemble controller."""
self.ensemble = ensemble
self.direct_input = np.zeros(ensemble.size_in)
self.local_probes = list()
def make_vertices(self, model, n_steps): # TODO remove n_steps
"""Construct the data which can be loaded into the memory of a
SpiNNaker machine.
"""
# Build encoders, gain and bias regions
params = model.params[self.ensemble]
# Combine the encoders with the gain and then convert to S1615 before
# creating the region.
encoders_with_gain = params.encoders * params.gain[:, np.newaxis]
self.encoders_region = regions.MatrixRegion(
tp.np_to_fix(encoders_with_gain),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Combine the direct input with the bias before converting to S1615 and
# creating the region.
bias_with_di = params.bias + np.dot(encoders_with_gain,
self.direct_input)
assert bias_with_di.ndim == 1
self.bias_region = regions.MatrixRegion(
tp.np_to_fix(bias_with_di),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Convert the gains to S1615 before creating the region
self.gain_region = regions.MatrixRegion(
tp.np_to_fix(params.gain),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Extract all the filters from the incoming connections
incoming = model.get_signals_connections_to_object(self)
self.input_filters, self.input_filter_routing = make_filter_regions(
incoming[InputPort.standard], model.dt, True,
model.keyspaces.filter_routing_tag
)
self.inhib_filters, self.inhib_filter_routing = make_filter_regions(
incoming[EnsembleInputPort.global_inhibition], model.dt, True,
model.keyspaces.filter_routing_tag
)
self.mod_filters, self.mod_filter_routing = make_filter_regions(
{}, model.dt, True, model.keyspaces.filter_routing_tag
)
# Extract all the decoders for the outgoing connections and build the
# regions for the decoders and the regions for the output keys.
outgoing = model.get_signals_connections_from_object(self)
decoders, output_keys = \
get_decoders_and_keys(model, outgoing[OutputPort.standard])
size_out = decoders.shape[1]
# TODO: Include learnt decoders
self.pes_region = PESRegion()
self.decoders_region = regions.MatrixRegion(
tp.np_to_fix(decoders / model.dt),
sliced_dimension=regions.MatrixPartitioning.rows
)
self.output_keys_region = regions.KeyspacesRegion(
output_keys, fields=[regions.KeyField({'cluster': 'cluster'})]
)
# Create the regions list
self.regions = [
SystemRegion(self.ensemble.size_in,
size_out,
model.machine_timestep,
self.ensemble.neuron_type.tau_ref,
self.ensemble.neuron_type.tau_rc,
model.dt,
False # Base this on whether we have a probe attached
),
self.bias_region,
self.encoders_region,
self.decoders_region,
self.output_keys_region,
self.input_filters,
self.input_filter_routing,
self.inhib_filters,
self.inhib_filter_routing,
self.gain_region,
self.mod_filters,
self.mod_filter_routing,
self.pes_region,
None,
None # Will be the spike recording region
]
# Partition the ensemble and get a list of vertices to load to the
# machine. We can expect to be DTCM or CPU bound, so the SDRAM bound
# can be quite lax to allow for lots of data probing.
# TODO: Include other DTCM usage
# TODO: Include CPU usage constraint
self.vertices = list()
sdram_constraint = partition.Constraint(8*2**20) # Max 8MiB
dtcm_constraint = partition.Constraint(64*2**10, .75) # 75% of 64KiB
constraints = {
sdram_constraint: lambda s: regions.utils.sizeof_regions(
self.regions, s),
dtcm_constraint: lambda s: regions.utils.sizeof_regions(
self.regions, s),
}
for sl in partition.partition(slice(0, self.ensemble.n_neurons),
constraints):
resources = {
Cores: 1,
SDRAM: regions.utils.sizeof_regions(self.regions, sl),
}
vsl = VertexSlice(sl, get_application("ensemble"), resources)
self.vertices.append(vsl)
# Return the vertices and callback methods
return netlistspec(self.vertices, self.load_to_machine)
def load_to_machine(self, netlist, controller):
"""Load the ensemble data into memory."""
# For each slice
for vertex in self.vertices:
# Layout the slice of SDRAM we have been given
region_memory = regions.utils.create_app_ptr_and_region_files(
netlist.vertices_memory[vertex], self.regions, vertex.slice)
# Write in each region
for region, mem in zip(self.regions, region_memory):
if region is None:
pass
elif region is self.output_keys_region:
self.output_keys_region.write_subregion_to_file(
mem, vertex.slice, cluster=vertex.cluster)
else:
region.write_subregion_to_file(mem, vertex.slice)
def before_simulation(self, netlist, controller, simulator, n_steps):
"""Load data for a specific number of steps to the machine."""
# TODO When supported by executables
raise NotImplementedError
def after_simulation(self, netlist, controller, simulator, n_steps):
"""Retrieve data from a simulation and ensure."""
raise NotImplementedError
# If we have probed the spikes then retrieve the spike data and store
# it in the simulator data.
class SystemRegion(collections.namedtuple(
"SystemRegion", "n_input_dimensions, n_output_dimensions, "
"machine_timestep, t_ref, t_rc, dt, probe_spikes")):
"""Region of memory describing the general parameters of a LIF ensemble."""
def sizeof(self, vertex_slice=slice(None)):
"""Get the number of bytes necessary to represent this region of
memory.
"""
return 8 * 4 # 8 words
sizeof_padded = sizeof
def write_subregion_to_file(self, fp, vertex_slice):
"""Write the system region for a specific vertex slice to a file-like
object.
"""
n_neurons = vertex_slice.stop - vertex_slice.start
data = struct.pack(
"<8I",
self.n_input_dimensions,
self.n_output_dimensions,
n_neurons,
self.machine_timestep,
int(self.t_ref // self.dt),
tp.value_to_fix(self.dt / self.t_rc),
(0x1 if self.probe_spikes else 0x0),
1
)
fp.write(data)
class PESRegion(regions.Region):
"""Region representing parameters for PES learning rules.
"""
# TODO Implement PES
def sizeof(self, *args):
return 4
def write_subregion_to_file(self, fp, vertex_slice):
# Write out a zero, indicating no PES data
fp.write(b"\x00" * 4)
def get_decoders_and_keys(model, signals_connections):
"""Get a combined decoder matrix and a list of keys to use to transmit
elements decoded using the decoders.
"""
decoders = list()
keys = list()
# For each signal with a single connection we save the decoder and generate
# appropriate keys
for signal, connections in iteritems(signals_connections):
assert len(connections) == 1
decoder = model.params[connections[0]].decoders
transform = model.params[connections[0]].transform
decoder = np.dot(transform, decoder.T)
decoders.append(decoder.T)
for i in range(decoder.shape[0]):
keys.append(signal.keyspace(index=i))
# Stack the decoders
decoders = np.hstack(decoders)
return decoders, keys
Allow Ensembles with no outgoing connections
"""LIF Ensemble
Takes an intermediate representation of a LIF ensemble and returns a vertex and
appropriate callbacks to load and prepare the ensemble for simulation on
SpiNNaker. The build method also manages the partitioning of the ensemble into
appropriate sized slices.
"""
import collections
import numpy as np
from rig.machine import Cores, SDRAM
from six import iteritems
import struct
from nengo_spinnaker.builder.builder import InputPort, netlistspec, OutputPort
from nengo_spinnaker.builder.ports import EnsembleInputPort
from nengo_spinnaker.regions.filters import make_filter_regions
from .. import regions
from nengo_spinnaker.netlist import VertexSlice
from nengo_spinnaker import partition_and_cluster as partition
from nengo_spinnaker.utils.application import get_application
from nengo_spinnaker.utils import type_casts as tp
class EnsembleLIF(object):
"""Controller for an ensemble of LIF neurons."""
def __init__(self, ensemble):
"""Create a new LIF ensemble controller."""
self.ensemble = ensemble
self.direct_input = np.zeros(ensemble.size_in)
self.local_probes = list()
def make_vertices(self, model, n_steps): # TODO remove n_steps
"""Construct the data which can be loaded into the memory of a
SpiNNaker machine.
"""
# Build encoders, gain and bias regions
params = model.params[self.ensemble]
# Combine the encoders with the gain and then convert to S1615 before
# creating the region.
encoders_with_gain = params.encoders * params.gain[:, np.newaxis]
self.encoders_region = regions.MatrixRegion(
tp.np_to_fix(encoders_with_gain),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Combine the direct input with the bias before converting to S1615 and
# creating the region.
bias_with_di = params.bias + np.dot(encoders_with_gain,
self.direct_input)
assert bias_with_di.ndim == 1
self.bias_region = regions.MatrixRegion(
tp.np_to_fix(bias_with_di),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Convert the gains to S1615 before creating the region
self.gain_region = regions.MatrixRegion(
tp.np_to_fix(params.gain),
sliced_dimension=regions.MatrixPartitioning.rows
)
# Extract all the filters from the incoming connections
incoming = model.get_signals_connections_to_object(self)
self.input_filters, self.input_filter_routing = make_filter_regions(
incoming[InputPort.standard], model.dt, True,
model.keyspaces.filter_routing_tag
)
self.inhib_filters, self.inhib_filter_routing = make_filter_regions(
incoming[EnsembleInputPort.global_inhibition], model.dt, True,
model.keyspaces.filter_routing_tag
)
self.mod_filters, self.mod_filter_routing = make_filter_regions(
{}, model.dt, True, model.keyspaces.filter_routing_tag
)
# Extract all the decoders for the outgoing connections and build the
# regions for the decoders and the regions for the output keys.
outgoing = model.get_signals_connections_from_object(self)
decoders, output_keys = \
get_decoders_and_keys(model, outgoing[OutputPort.standard])
size_out = decoders.shape[1]
# TODO: Include learnt decoders
self.pes_region = PESRegion()
self.decoders_region = regions.MatrixRegion(
tp.np_to_fix(decoders / model.dt),
sliced_dimension=regions.MatrixPartitioning.rows
)
self.output_keys_region = regions.KeyspacesRegion(
output_keys, fields=[regions.KeyField({'cluster': 'cluster'})]
)
# Create the regions list
self.regions = [
SystemRegion(self.ensemble.size_in,
size_out,
model.machine_timestep,
self.ensemble.neuron_type.tau_ref,
self.ensemble.neuron_type.tau_rc,
model.dt,
False # Base this on whether we have a probe attached
),
self.bias_region,
self.encoders_region,
self.decoders_region,
self.output_keys_region,
self.input_filters,
self.input_filter_routing,
self.inhib_filters,
self.inhib_filter_routing,
self.gain_region,
self.mod_filters,
self.mod_filter_routing,
self.pes_region,
None,
None # Will be the spike recording region
]
# Partition the ensemble and get a list of vertices to load to the
# machine. We can expect to be DTCM or CPU bound, so the SDRAM bound
# can be quite lax to allow for lots of data probing.
# TODO: Include other DTCM usage
# TODO: Include CPU usage constraint
self.vertices = list()
sdram_constraint = partition.Constraint(8*2**20) # Max 8MiB
dtcm_constraint = partition.Constraint(64*2**10, .75) # 75% of 64KiB
constraints = {
sdram_constraint: lambda s: regions.utils.sizeof_regions(
self.regions, s),
dtcm_constraint: lambda s: regions.utils.sizeof_regions(
self.regions, s),
}
for sl in partition.partition(slice(0, self.ensemble.n_neurons),
constraints):
resources = {
Cores: 1,
SDRAM: regions.utils.sizeof_regions(self.regions, sl),
}
vsl = VertexSlice(sl, get_application("ensemble"), resources)
self.vertices.append(vsl)
# Return the vertices and callback methods
return netlistspec(self.vertices, self.load_to_machine)
def load_to_machine(self, netlist, controller):
"""Load the ensemble data into memory."""
# For each slice
for vertex in self.vertices:
# Layout the slice of SDRAM we have been given
region_memory = regions.utils.create_app_ptr_and_region_files(
netlist.vertices_memory[vertex], self.regions, vertex.slice)
# Write in each region
for region, mem in zip(self.regions, region_memory):
if region is None:
pass
elif region is self.output_keys_region:
self.output_keys_region.write_subregion_to_file(
mem, vertex.slice, cluster=vertex.cluster)
else:
region.write_subregion_to_file(mem, vertex.slice)
def before_simulation(self, netlist, controller, simulator, n_steps):
"""Load data for a specific number of steps to the machine."""
# TODO When supported by executables
raise NotImplementedError
def after_simulation(self, netlist, controller, simulator, n_steps):
"""Retrieve data from a simulation and ensure."""
raise NotImplementedError
# If we have probed the spikes then retrieve the spike data and store
# it in the simulator data.
class SystemRegion(collections.namedtuple(
"SystemRegion", "n_input_dimensions, n_output_dimensions, "
"machine_timestep, t_ref, t_rc, dt, probe_spikes")):
"""Region of memory describing the general parameters of a LIF ensemble."""
def sizeof(self, vertex_slice=slice(None)):
"""Get the number of bytes necessary to represent this region of
memory.
"""
return 8 * 4 # 8 words
sizeof_padded = sizeof
def write_subregion_to_file(self, fp, vertex_slice):
"""Write the system region for a specific vertex slice to a file-like
object.
"""
n_neurons = vertex_slice.stop - vertex_slice.start
data = struct.pack(
"<8I",
self.n_input_dimensions,
self.n_output_dimensions,
n_neurons,
self.machine_timestep,
int(self.t_ref // self.dt),
tp.value_to_fix(self.dt / self.t_rc),
(0x1 if self.probe_spikes else 0x0),
1
)
fp.write(data)
class PESRegion(regions.Region):
"""Region representing parameters for PES learning rules.
"""
# TODO Implement PES
def sizeof(self, *args):
return 4
def write_subregion_to_file(self, fp, vertex_slice):
# Write out a zero, indicating no PES data
fp.write(b"\x00" * 4)
def get_decoders_and_keys(model, signals_connections):
"""Get a combined decoder matrix and a list of keys to use to transmit
elements decoded using the decoders.
"""
decoders = list()
keys = list()
# For each signal with a single connection we save the decoder and generate
# appropriate keys
for signal, connections in iteritems(signals_connections):
assert len(connections) == 1
decoder = model.params[connections[0]].decoders
transform = model.params[connections[0]].transform
decoder = np.dot(transform, decoder.T)
decoders.append(decoder.T)
for i in range(decoder.shape[0]):
keys.append(signal.keyspace(index=i))
# Stack the decoders
if len(decoders) > 0:
decoders = np.hstack(decoders)
else:
decoders = np.array([[]])
return decoders, keys
|
#!/usr/bin/python
"""
**Fibonacci Sequence** - Enter a number and have the program generate the Fibonacci sequence to that number or to the Nth number.
"""
def fibonnaciSequence(n):
assert n > 0
sequence = [1] # Initialize sequence to 1
while len(sequence) < n:
if len(sequence) == 1:
# If the length of sequence is 1, append 1 to the end so the series is 1, 1 so far
sequence.append(1)
else:
# Add the previous 2 numbers in the series, and append it to the list
sequence.append(sequence[-1] + sequence[-2])
for i in range(len(sequence)):
# Convert the numbers to strings
sequence[i] = str(sequence[i])
# Return the sequence, separated by commas
return (', '.join(sequence))
def main():
print(fibonnaciSequence(int(input('How many numbers do you need? '))))
if __name__ == "__main__":
main()
Adding comments to fibonacci.py
#!/usr/bin/python
"""
**Fibonacci Sequence**
Enter a number and have the program generate the Fibonacci sequence
to that number or to the Nth number.
"""
def fibonnaciSequence(n):
assert n > 0
sequence = [1] # Initialize sequence to 1
while len(sequence) < n:
if len(sequence) == 1:
# If the length of sequence is 1, append 1 to the end so the series is 1, 1 so far
sequence.append(1)
else:
# Add the previous 2 numbers in the series, and append it to the list
sequence.append(sequence[-1] + sequence[-2])
for i in range(len(sequence)):
# Convert the numbers to strings
sequence[i] = str(sequence[i])
# Return the sequence, separated by commas
return (', '.join(sequence))
def main():
print(fibonnaciSequence(int(input('How many numbers do you need? '))))
if __name__ == "__main__":
main()
|
class ApiObject:
def __init__(self, _type=None, **data):
self._type = _type
self.data = data
def get_or_fail(self, key):
value = self.data[key]
return self.wrap_api_object(value)
def get_or_default(self, key, default=None):
value = self.data.get(key, default)
return self.wrap_api_object(value)
def __getattr__(self, item):
if len(item) > 1 and item[-1] == "_":
item = item[:-1]
return self.get_or_default(item)
@staticmethod
def wrap_api_object(data):
if type(data) is dict:
return ApiObject(**data)
elif type(data) is list:
return ApiObjectList(data)
else:
return data
class ApiObjectList:
def __init__(self, data_list: list):
self.data_list = data_list
def __iter__(self):
return self.__wrapped_api_objects()
def __wrapped_api_objects(self):
for data in self.data_list:
yield ApiObject.wrap_api_object(data)
class OutApiObject(ApiObject):
LOCAL_PARAM_ERROR_CALLBACK = "__error_callback"
LOCAL_PARAMS = [LOCAL_PARAM_ERROR_CALLBACK]
def with_error_callback(self, func):
self.data[self.LOCAL_PARAM_ERROR_CALLBACK] = func
return self
class Message(OutApiObject):
def to_chat(self, chat=None, message=None, chat_id=None):
if message is not None:
chat = message.chat
if chat is not None:
chat_id = chat.id
self.data["chat_id"] = chat_id
return self
def reply_to_message(self, message=None, message_id=None):
if message is not None:
message_id = message.message_id
self.data["reply_to_message_id"] = message_id
return self
def to_chat_replying(self, message):
self.to_chat(message=message)
self.reply_to_message(message)
return self
@staticmethod
def create(text, chat_id=None, **kwargs):
return Message(_type=Message, text=text, chat_id=chat_id, **kwargs)
@staticmethod
def create_reply(message, reply_text):
return Message.create(reply_text).to_chat(message=message).reply_to_message(message)
class MessageEntityParser:
def __init__(self, message):
self.text_as_utf16_bytes = message.text.encode("utf-16")
def get_entity_text(self, entity):
start_byte = 2 + entity.offset * 2 # BOM + characters * 2 bytes
end_byte = start_byte + entity.length * 2
return self.text_as_utf16_bytes[start_byte:end_byte].decode("utf-16")
def get_text_after_entity(self, entity):
start_byte = 2 + (entity.offset + entity.length) * 2
return self.text_as_utf16_bytes[start_byte:].decode("utf-16")
Create Photo object extending Message
class ApiObject:
def __init__(self, _type=None, **data):
self._type = _type
self.data = data
def get_or_fail(self, key):
value = self.data[key]
return self.wrap_api_object(value)
def get_or_default(self, key, default=None):
value = self.data.get(key, default)
return self.wrap_api_object(value)
def __getattr__(self, item):
if len(item) > 1 and item[-1] == "_":
item = item[:-1]
return self.get_or_default(item)
@staticmethod
def wrap_api_object(data):
if type(data) is dict:
return ApiObject(**data)
elif type(data) is list:
return ApiObjectList(data)
else:
return data
class ApiObjectList:
def __init__(self, data_list: list):
self.data_list = data_list
def __iter__(self):
return self.__wrapped_api_objects()
def __wrapped_api_objects(self):
for data in self.data_list:
yield ApiObject.wrap_api_object(data)
class OutApiObject(ApiObject):
LOCAL_PARAM_ERROR_CALLBACK = "__error_callback"
LOCAL_PARAMS = [LOCAL_PARAM_ERROR_CALLBACK]
def with_error_callback(self, func):
self.data[self.LOCAL_PARAM_ERROR_CALLBACK] = func
return self
class Message(OutApiObject):
def to_chat(self, chat=None, message=None, chat_id=None):
if message is not None:
chat = message.chat
if chat is not None:
chat_id = chat.id
self.data["chat_id"] = chat_id
return self
def reply_to_message(self, message=None, message_id=None):
if message is not None:
message_id = message.message_id
self.data["reply_to_message_id"] = message_id
return self
def to_chat_replying(self, message):
self.to_chat(message=message)
self.reply_to_message(message)
return self
@staticmethod
def create(text, chat_id=None, **kwargs):
return Message(_type=Message, text=text, chat_id=chat_id, **kwargs)
@staticmethod
def create_reply(message, reply_text):
return Message.create(reply_text).to_chat(message=message).reply_to_message(message)
class Photo(Message):
@staticmethod
def create_photo(file_id):
return Photo(_type=Photo, photo=file_id)
class MessageEntityParser:
def __init__(self, message):
self.text_as_utf16_bytes = message.text.encode("utf-16")
def get_entity_text(self, entity):
start_byte = 2 + entity.offset * 2 # BOM + characters * 2 bytes
end_byte = start_byte + entity.length * 2
return self.text_as_utf16_bytes[start_byte:end_byte].decode("utf-16")
def get_text_after_entity(self, entity):
start_byte = 2 + (entity.offset + entity.length) * 2
return self.text_as_utf16_bytes[start_byte:].decode("utf-16")
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from ChatExchange.chatexchange.client import Client
import HTMLParser
import md5
import ConfigParser
from helpers import environ_or_none
import threading
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
api_backoff_time = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
socvr_room_id = "41570"
blockedTime = {"all": 0, charcoal_room_id: 0, meta_tavern_room_id: 0, socvr_room_id: 0}
experimental_reasons = [] # Don't widely report these
non_socvr_reasons = [] # Don't report to SOCVR
non_tavern_reasons = [ # Don't report in the Tavern
"all-caps body",
"all-caps answer",
"repeating characters in body",
"repeating characters in title",
"repeating characters in answer",
"few unique characters in body",
"few unique characters in answer",
"title has only one unique char",
"phone number detected in title",
"offensive body detected",
"no whitespace in body",
"no whitespace in answer",
]
non_tavern_sites = ["stackoverflow.com"]
parser = HTMLParser.HTMLParser()
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
wrapso = Client("stackoverflow.com")
privileged_users = {
charcoal_room_id: [
"117490", # Normal Human
"66258", # Andy
"31768", # ManishEarth
"103081", # hichris123
"73046", # Undo
"88521", # ProgramFOX
"59776", # Doorknob
"31465", # Seth
"88577", # Santa Claus
"34124", # Andrew Leach
"54229", # apnorton
"20459", # S.L. Barth
"32436", # tchrist
"30477", # Brock Adams
"58529", # ferrybig
"145208", # Robert Longson
"178825", # Ms Yvette
"171800", # JAL
"64978", # PeterJ
"125141", # Jeffrey Bosboom
"54902", # bummi
"135450", # M.A.R.
"145604", # Quill
"60548", # rene
"121401", # michaelpri
"116218", # JamesENL
"82927", # Braiam
"11606", # bwDraco
"19761", # Ilmari Karonen
"108271", # Andrew T.
"171054", # Magisch
"190011", # Petter Friberg
"165661", # Tunaki
"145086", # Wai Ha Lee
"137665", # ByteCommander
"147884", # wythagoras
"186395", # Åna
"193364", # Ashish Ahuja
"163686", # Gothdo
"145827", # angussidney
"244748", # Supreme Leader SnokeDetector (angussidney's sock)
"121520", # ArtOfCode
"244382", # Lt. A. Code (ArtOfCode's sock to test things with)
"137388", # QPaysTaxes
"212311", # Ryan Bemrose
"172397", # Kyll
"224538", # FrankerZ
"61202", # OldSkool
"56166", # Jan Dvorak
"133966", # DavidPostill
"22839", # djsmiley2k
"97389", # Kaz Wolfe
"144962", # DJMcMayhem
"139423", # NobodyNada
"62118", # tripleee
"130558", # Registered User
"128113", # arda
"164318", # Glorfindel
"175347", # Floern
"180274" # Alexander O'Mara
],
meta_tavern_room_id: [
"315433", # Normal Human
"244519", # CRABOLO
"244382", # TGMCians
"194047", # Jan Dvorak
"158100", # rene
"178438", # Manishearth
"237685", # hichris123
"215468", # Undo
"229438", # ProgramFOX
"180276", # Doorknob
"161974", # Lynn Crumbling
"186281", # Andy
"266094", # Unihedro
"245167", # Infinite Recursion
"230261", # Jason C
"213575", # Braiam
"241919", # Andrew T.
"203389", # backwards-Seth
"202832", # Mooseman
"160017", # bwDraco
"201151", # bummi
"188558", # Frank
"229166", # Santa Claus
"159034", # Kevin Brown
"203972", # PeterJ
"188673", # Alexis King
"258672", # AstroCB
"227577", # Sam
"255735", # cybermonkey
"279182", # Ixrec
"271104", # James
"220428", # Qantas 94 Heavy
"153355", # tchrist
"238426", # Ed Cottrell
"166899", # Second Rikudo
"287999", # ASCIIThenANSI
"208518", # JNat
"284141", # michaelpri
"260312", # vaultah
"244062", # SouravGhosh
"152859", # Shadow Wizard
"201314", # apnorton
"280934", # M.A.Ramezani
"200235", # durron597
"148310", # Awesome Poodles / Brock Adams
"168333", # S.L. Barth
"257207", # Unikitty
"244282", # DroidDev
"163250", # Cupcake
"298265", # BoomsPlus
"253560", # josilber
"244254", # misterManSam
"188189", # Robert Longson
"174699", # Ilmari Karonen
"202362", # chmod 666 telkitty
"289717", # Quill
"237813", # bjb568
"311345", # Simon Klaver
"171881", # rekire
"260388", # Pandya
"310756", # Ms Yvette
"262399", # Jeffrey Bosboom
"242209", # JAL
"280883", # ByteCommander
"302251", # kos
"262823", # ArtOfCode
"215067", # Ferrybig
"308386", # Magisch
"285368" # angussidney
],
socvr_room_id: [
"1849664", # Undo
"2581872", # hichris123
"1198729", # Manishearth
"3717023", # Normal Human aka 1999
"2619912", # ProgramFOX
"578411", # rene
"1043380", # gunr2171
"2246344", # Sam
"2756409", # TylerH
"1768232", # durron597
"359284", # Kevin Brown
"258400", # easwee
"3622940", # Unihedron
"3204551", # Deduplicator
"4342498", # NathanOliver
"4639281", # Tiny Giant
"3093387", # josilber
"1652962", # cimmanon
"1677912", # Mogsdad
"656243", # Lynn Crumbling
"3933332", # Rizier123
"2422013", # cybermonkey
"3478852", # Nisse Engström
"2302862", # Siguza
"1324", # Paul Roub
"1743880", # Tunaki
"1663001", # DavidG
"2415822", # JAL
"4174897", # Kyll
"5299236", # Kevin Guan
"4050842", # Thaillie
"1816093", # Drew
"874188", # Triplee
"880772", # approxiblue
"1835379", # Cerbrus
"3956566", # JamesENL
"2357233", # Ms Yvette
"3155639", # AlexanderOMara
"462627", # Praveen Kumar
"4490559", # intboolstring
"1364007", # Wai Ha Lee
"1699210", # bummi
"563532", # Rob
"5389107", # Magisch
"4099593", # bhargav-rao
"1542723", # Ferrybig
"2025923", # Tushar
"5292302", # Petter Friberg
"792066", # Braiam
"5666987", # Ian
"3160466", # ArtOfCode
"5735775", # Ashish Ahuja
"3476191", # Nobody Nada
"2227743", # Eric D
"821878", # Ryan Bemrose
"1413395", # Panta Rei
"4875631", # FrankerZ
"2958086", # Compass
"499214", # JanDvorak
"5647260", # Andrew L.
"559745" # Floern
]
}
code_privileged_users = None
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345",
socvr_room_id: "3735529"}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = os.popen('git log --pretty=format:"%h" -n 1').read()
commit_author = os.popen('git log --pretty=format:"%an" -n 1').read()
if md5.new(commit_author).hexdigest() in censored_committer_names:
commit_author = censored_committer_names[md5.new(commit_author).hexdigest()]
commit_with_author = os.popen('git log --pretty=format:"%h (' + commit_author + ': *%s*)" -n 1').read()
on_master = os.popen("git rev-parse --abbrev-ref HEAD").read().strip() == "master"
charcoal_hq = None
tavern_on_the_meta = None
socvr = None
s = ""
s_reverted = ""
specialrooms = []
apiquota = -1
bodyfetcher = None
se_sites = []
users_chatting = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: []}
why_data = []
why_data_allspam = []
notifications = []
listen_to_these_if_edited = []
multiple_reporters = []
api_calls_per_site = {}
api_request_lock = threading.Lock()
config = ConfigParser.RawConfigParser()
if os.path.isfile('config'):
config.read('config')
else:
config.read('config.ci')
latest_smokedetector_messages = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: []}
# environ_or_none defined in helpers.py
bot_name = environ_or_none("SMOKEDETECTOR_NAME") or "SmokeDetector"
bot_repository = environ_or_none("SMOKEDETECTOR_REPO") or "//github.com/Charcoal-SE/SmokeDetector"
chatmessage_prefix = "[{}]({})".format(bot_name, bot_repository)
site_id_dict = {}
post_site_id_to_question = {}
location = config.get("Config", "location")
print location
metasmoke_ws = None
try:
metasmoke_host = config.get("Config", "metasmoke_host")
print metasmoke_host
except ConfigParser.NoOptionError:
metasmoke_host = None
print "metasmoke host not found. Set it as metasmoke_host in the config file. See https://github.com/Charcoal-SE/metasmoke."
try:
metasmoke_key = config.get("Config", "metasmoke_key")
except ConfigParser.NoOptionError:
metasmoke_key = ""
print "No metasmoke key found, which is okay if both are running on the same host"
try:
metasmoke_ws_host = config.get("Config", "metasmoke_ws_host")
except ConfigParser.NoOptionError:
metasmoke_ws_host = ""
print "No metasmoke websocket host found, which is okay if you're anti-websocket"
try:
github_username = config.get("Config", "github_username")
github_password = config.get("Config", "github_password")
except ConfigParser.NoOptionError:
github_username = None
github_password = None
+Rob to CHQ --autopull
Already privileged in SOCVR
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from ChatExchange.chatexchange.client import Client
import HTMLParser
import md5
import ConfigParser
from helpers import environ_or_none
import threading
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
api_backoff_time = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
socvr_room_id = "41570"
blockedTime = {"all": 0, charcoal_room_id: 0, meta_tavern_room_id: 0, socvr_room_id: 0}
experimental_reasons = [] # Don't widely report these
non_socvr_reasons = [] # Don't report to SOCVR
non_tavern_reasons = [ # Don't report in the Tavern
"all-caps body",
"all-caps answer",
"repeating characters in body",
"repeating characters in title",
"repeating characters in answer",
"few unique characters in body",
"few unique characters in answer",
"title has only one unique char",
"phone number detected in title",
"offensive body detected",
"no whitespace in body",
"no whitespace in answer",
]
non_tavern_sites = ["stackoverflow.com"]
parser = HTMLParser.HTMLParser()
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
wrapso = Client("stackoverflow.com")
privileged_users = {
charcoal_room_id: [
"117490", # Normal Human
"66258", # Andy
"31768", # ManishEarth
"103081", # hichris123
"73046", # Undo
"88521", # ProgramFOX
"59776", # Doorknob
"31465", # Seth
"88577", # Santa Claus
"34124", # Andrew Leach
"54229", # apnorton
"20459", # S.L. Barth
"32436", # tchrist
"30477", # Brock Adams
"58529", # ferrybig
"145208", # Robert Longson
"178825", # Ms Yvette
"171800", # JAL
"64978", # PeterJ
"125141", # Jeffrey Bosboom
"54902", # bummi
"135450", # M.A.R.
"145604", # Quill
"60548", # rene
"121401", # michaelpri
"116218", # JamesENL
"82927", # Braiam
"11606", # bwDraco
"19761", # Ilmari Karonen
"108271", # Andrew T.
"171054", # Magisch
"190011", # Petter Friberg
"165661", # Tunaki
"145086", # Wai Ha Lee
"137665", # ByteCommander
"147884", # wythagoras
"186395", # Åna
"193364", # Ashish Ahuja
"163686", # Gothdo
"145827", # angussidney
"244748", # Supreme Leader SnokeDetector (angussidney's sock)
"121520", # ArtOfCode
"244382", # Lt. A. Code (ArtOfCode's sock to test things with)
"137388", # QPaysTaxes
"212311", # Ryan Bemrose
"172397", # Kyll
"224538", # FrankerZ
"61202", # OldSkool
"56166", # Jan Dvorak
"133966", # DavidPostill
"22839", # djsmiley2k
"97389", # Kaz Wolfe
"144962", # DJMcMayhem
"139423", # NobodyNada
"62118", # tripleee
"130558", # Registered User
"128113", # arda
"164318", # Glorfindel
"175347", # Floern
"180274", # Alexander O'Mara
"158742" # Rob
],
meta_tavern_room_id: [
"315433", # Normal Human
"244519", # CRABOLO
"244382", # TGMCians
"194047", # Jan Dvorak
"158100", # rene
"178438", # Manishearth
"237685", # hichris123
"215468", # Undo
"229438", # ProgramFOX
"180276", # Doorknob
"161974", # Lynn Crumbling
"186281", # Andy
"266094", # Unihedro
"245167", # Infinite Recursion
"230261", # Jason C
"213575", # Braiam
"241919", # Andrew T.
"203389", # backwards-Seth
"202832", # Mooseman
"160017", # bwDraco
"201151", # bummi
"188558", # Frank
"229166", # Santa Claus
"159034", # Kevin Brown
"203972", # PeterJ
"188673", # Alexis King
"258672", # AstroCB
"227577", # Sam
"255735", # cybermonkey
"279182", # Ixrec
"271104", # James
"220428", # Qantas 94 Heavy
"153355", # tchrist
"238426", # Ed Cottrell
"166899", # Second Rikudo
"287999", # ASCIIThenANSI
"208518", # JNat
"284141", # michaelpri
"260312", # vaultah
"244062", # SouravGhosh
"152859", # Shadow Wizard
"201314", # apnorton
"280934", # M.A.Ramezani
"200235", # durron597
"148310", # Awesome Poodles / Brock Adams
"168333", # S.L. Barth
"257207", # Unikitty
"244282", # DroidDev
"163250", # Cupcake
"298265", # BoomsPlus
"253560", # josilber
"244254", # misterManSam
"188189", # Robert Longson
"174699", # Ilmari Karonen
"202362", # chmod 666 telkitty
"289717", # Quill
"237813", # bjb568
"311345", # Simon Klaver
"171881", # rekire
"260388", # Pandya
"310756", # Ms Yvette
"262399", # Jeffrey Bosboom
"242209", # JAL
"280883", # ByteCommander
"302251", # kos
"262823", # ArtOfCode
"215067", # Ferrybig
"308386", # Magisch
"285368" # angussidney
],
socvr_room_id: [
"1849664", # Undo
"2581872", # hichris123
"1198729", # Manishearth
"3717023", # Normal Human aka 1999
"2619912", # ProgramFOX
"578411", # rene
"1043380", # gunr2171
"2246344", # Sam
"2756409", # TylerH
"1768232", # durron597
"359284", # Kevin Brown
"258400", # easwee
"3622940", # Unihedron
"3204551", # Deduplicator
"4342498", # NathanOliver
"4639281", # Tiny Giant
"3093387", # josilber
"1652962", # cimmanon
"1677912", # Mogsdad
"656243", # Lynn Crumbling
"3933332", # Rizier123
"2422013", # cybermonkey
"3478852", # Nisse Engström
"2302862", # Siguza
"1324", # Paul Roub
"1743880", # Tunaki
"1663001", # DavidG
"2415822", # JAL
"4174897", # Kyll
"5299236", # Kevin Guan
"4050842", # Thaillie
"1816093", # Drew
"874188", # Triplee
"880772", # approxiblue
"1835379", # Cerbrus
"3956566", # JamesENL
"2357233", # Ms Yvette
"3155639", # AlexanderOMara
"462627", # Praveen Kumar
"4490559", # intboolstring
"1364007", # Wai Ha Lee
"1699210", # bummi
"563532", # Rob
"5389107", # Magisch
"4099593", # bhargav-rao
"1542723", # Ferrybig
"2025923", # Tushar
"5292302", # Petter Friberg
"792066", # Braiam
"5666987", # Ian
"3160466", # ArtOfCode
"5735775", # Ashish Ahuja
"3476191", # Nobody Nada
"2227743", # Eric D
"821878", # Ryan Bemrose
"1413395", # Panta Rei
"4875631", # FrankerZ
"2958086", # Compass
"499214", # JanDvorak
"5647260", # Andrew L.
"559745" # Floern
]
}
code_privileged_users = None
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345",
socvr_room_id: "3735529"}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = os.popen('git log --pretty=format:"%h" -n 1').read()
commit_author = os.popen('git log --pretty=format:"%an" -n 1').read()
if md5.new(commit_author).hexdigest() in censored_committer_names:
commit_author = censored_committer_names[md5.new(commit_author).hexdigest()]
commit_with_author = os.popen('git log --pretty=format:"%h (' + commit_author + ': *%s*)" -n 1').read()
on_master = os.popen("git rev-parse --abbrev-ref HEAD").read().strip() == "master"
charcoal_hq = None
tavern_on_the_meta = None
socvr = None
s = ""
s_reverted = ""
specialrooms = []
apiquota = -1
bodyfetcher = None
se_sites = []
users_chatting = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: []}
why_data = []
why_data_allspam = []
notifications = []
listen_to_these_if_edited = []
multiple_reporters = []
api_calls_per_site = {}
api_request_lock = threading.Lock()
config = ConfigParser.RawConfigParser()
if os.path.isfile('config'):
config.read('config')
else:
config.read('config.ci')
latest_smokedetector_messages = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: []}
# environ_or_none defined in helpers.py
bot_name = environ_or_none("SMOKEDETECTOR_NAME") or "SmokeDetector"
bot_repository = environ_or_none("SMOKEDETECTOR_REPO") or "//github.com/Charcoal-SE/SmokeDetector"
chatmessage_prefix = "[{}]({})".format(bot_name, bot_repository)
site_id_dict = {}
post_site_id_to_question = {}
location = config.get("Config", "location")
print location
metasmoke_ws = None
try:
metasmoke_host = config.get("Config", "metasmoke_host")
print metasmoke_host
except ConfigParser.NoOptionError:
metasmoke_host = None
print "metasmoke host not found. Set it as metasmoke_host in the config file. See https://github.com/Charcoal-SE/metasmoke."
try:
metasmoke_key = config.get("Config", "metasmoke_key")
except ConfigParser.NoOptionError:
metasmoke_key = ""
print "No metasmoke key found, which is okay if both are running on the same host"
try:
metasmoke_ws_host = config.get("Config", "metasmoke_ws_host")
except ConfigParser.NoOptionError:
metasmoke_ws_host = ""
print "No metasmoke websocket host found, which is okay if you're anti-websocket"
try:
github_username = config.get("Config", "github_username")
github_password = config.get("Config", "github_password")
except ConfigParser.NoOptionError:
github_username = None
github_password = None
|
Revert "make atomic_replace use shutil.copy2 instead of os.rename() so it will work across filesystems".
AR function was leaving some tmp files behind, want to revert, will have better implementation soon, this is the old way now.
This reverts commit f74a1fa4f0b1a928a85c1316dfceb42637624a2f.
|
from django.test import TestCase
# Create your tests here.
removed keywords/tests.py, there should be keyword tests in the main test folder
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, fmt_money, formatdate, getdate
from frappe import msgprint, _
from erpnext.setup.utils import get_company_currency
from erpnext.controllers.accounts_controller import AccountsController
class JournalVoucher(AccountsController):
def __init__(self, arg1, arg2=None):
super(JournalVoucher, self).__init__(arg1, arg2)
self.master_type = {}
self.credit_days_for = {}
self.credit_days_global = -1
self.is_approving_authority = -1
def validate(self):
if not self.is_opening:
self.is_opening='No'
self.clearance_date = None
super(JournalVoucher, self).validate_date_with_fiscal_year()
self.validate_cheque_info()
self.validate_entries_for_advance()
self.validate_debit_and_credit()
self.validate_against_jv()
self.validate_against_sales_invoice()
self.validate_against_purchase_invoice()
self.set_against_account()
self.create_remarks()
self.set_aging_date()
self.set_print_format_fields()
def on_submit(self):
if self.voucher_type in ['Bank Voucher', 'Contra Voucher', 'Journal Entry']:
self.check_credit_days()
self.make_gl_entries()
self.check_credit_limit()
def on_cancel(self):
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_jv")
self.make_gl_entries(1)
def validate_cheque_info(self):
if self.voucher_type in ['Bank Voucher']:
if not self.cheque_no or not self.cheque_date:
msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type),
raise_exception=1)
if self.cheque_date and not self.cheque_no:
msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1)
def validate_entries_for_advance(self):
for d in self.get('entries'):
if not d.is_advance and not d.against_voucher and \
not d.against_invoice and not d.against_jv:
master_type = frappe.db.get_value("Account", d.account, "master_type")
if (master_type == 'Customer' and flt(d.credit) > 0) or \
(master_type == 'Supplier' and flt(d.debit) > 0):
msgprint(_("Please check 'Is Advance' against Account {0} if this is an advance entry.").format(d.account))
def validate_against_jv(self):
for d in self.get('entries'):
if d.against_jv:
if d.against_jv == self.name:
frappe.throw(_("You can not enter current voucher in 'Against Journal Voucher' column"))
against_entries = frappe.db.sql("""select * from `tabJournal Voucher Detail`
where account = %s and docstatus = 1 and parent = %s
and ifnull(against_jv, '') = ''""", (d.account, d.against_jv), as_dict=True)
if not against_entries:
frappe.throw(_("Journal Voucher {0} does not have account {1} or already matched")
.format(d.against_jv, d.account))
else:
dr_or_cr = "debit" if d.credit > 0 else "credit"
valid = False
for jvd in against_entries:
if flt(jvd[dr_or_cr]) > 0:
valid = True
if not valid:
frappe.throw(_("Against Journal Voucher {0} does not have any unmatched {1} entry")
.format(d.against_jv, dr_or_cr))
def validate_against_sales_invoice(self):
for d in self.get("entries"):
if d.against_invoice:
if d.debit > 0:
frappe.throw(_("Row {0}: Debit entry can not be linked with a Sales Invoice")
.format(d.idx))
if frappe.db.get_value("Sales Invoice", d.against_invoice, "debit_to") != d.account:
frappe.throw(_("Row {0}: Account does not match with \
Sales Invoice Debit To account").format(d.idx, d.account))
def validate_against_purchase_invoice(self):
for d in self.get("entries"):
if d.against_voucher:
if flt(d.credit) > 0:
frappe.throw(_("Row {0}: Credit entry can not be linked with a Purchase Invoice")
.format(d.idx))
if frappe.db.get_value("Purchase Invoice", d.against_voucher, "credit_to") != d.account:
frappe.throw(_("Row {0}: Account does not match with \
Purchase Invoice Credit To account").format(d.idx, d.account))
def set_against_account(self):
accounts_debited, accounts_credited = [], []
for d in self.get("entries"):
if flt(d.debit > 0): accounts_debited.append(d.account)
if flt(d.credit) > 0: accounts_credited.append(d.account)
for d in self.get("entries"):
if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited)))
def validate_debit_and_credit(self):
self.total_debit, self.total_credit, self.difference = 0, 0, 0
for d in self.get("entries"):
if d.debit and d.credit:
frappe.throw(_("You cannot credit and debit same account at the same time"))
self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "entries"))
self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "entries"))
self.difference = flt(self.total_debit, self.precision("total_debit")) - \
flt(self.total_credit, self.precision("total_credit"))
if self.difference:
frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}")
.format(self.difference))
def create_remarks(self):
r = []
if self.cheque_no:
if self.cheque_date:
r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date)))
else:
msgprint(_("Please enter Reference date"), raise_exception=1)
for d in self.get('entries'):
if d.against_invoice and d.credit:
currency = frappe.db.get_value("Sales Invoice", d.against_invoice, "currency")
r.append(_("{0} {1} against Invoice {1}").format(currency, fmt_money(flt(d.credit)), d.against_invoice))
if d.against_voucher and d.debit:
bill_no = frappe.db.sql("""select bill_no, bill_date, currency
from `tabPurchase Invoice` where name=%s""", d.against_voucher)
if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \
not in ['na', 'not applicable', 'none']:
r.append(_('{0} {1} against Bill {2} dated {3}').format(bill_no[0][2],
fmt_money(flt(d.debit)), bill_no[0][0],
bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d'))))
if self.user_remark:
r.append(_("Note: {0}").format(self.user_remark))
if r:
self.remark = ("\n").join(r)
else:
frappe.msgprint(_("User Remarks is mandatory"), raise_exception=1)
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
else:
# check account type whether supplier or customer
exists = False
for d in self.get('entries'):
account_type = frappe.db.get_value("Account", d.account, "account_type")
if account_type in ["Supplier", "Customer"]:
exists = True
break
# If customer/supplier account, aging date is mandatory
if exists and not self.aging_date:
msgprint(_("Aging Date is mandatory for opening entry"), raise_exception=1)
else:
self.aging_date = self.posting_date
def set_print_format_fields(self):
for d in self.get('entries'):
account_type, master_type = frappe.db.get_value("Account", d.account,
["account_type", "master_type"])
if master_type in ['Supplier', 'Customer']:
if not self.pay_to_recd_from:
self.pay_to_recd_from = frappe.db.get_value(master_type,
' - '.join(d.account.split(' - ')[:-1]),
master_type == 'Customer' and 'customer_name' or 'supplier_name')
if account_type in ['Bank', 'Cash']:
company_currency = get_company_currency(self.company)
amt = flt(d.debit) and d.debit or d.credit
self.total_amount = company_currency + ' ' + cstr(amt)
from frappe.utils import money_in_words
self.total_amount_in_words = money_in_words(amt, company_currency)
def check_credit_days(self):
date_diff = 0
if self.cheque_date:
date_diff = (getdate(self.cheque_date)-getdate(self.posting_date)).days
if date_diff <= 0: return
# Get List of Customer Account
acc_list = filter(lambda d: frappe.db.get_value("Account", d.account,
"master_type")=='Customer', self.get('entries'))
for d in acc_list:
credit_days = self.get_credit_days_for(d.account)
# Check credit days
if credit_days > 0 and not self.get_authorized_user() and cint(date_diff) > credit_days:
msgprint(_("Maximum allowed credit is {0} days after posting date").format(credit_days),
raise_exception=1)
def get_credit_days_for(self, ac):
if not self.credit_days_for.has_key(ac):
self.credit_days_for[ac] = cint(frappe.db.get_value("Account", ac, "credit_days"))
if not self.credit_days_for[ac]:
if self.credit_days_global==-1:
self.credit_days_global = cint(frappe.db.get_value("Company",
self.company, "credit_days"))
return self.credit_days_global
else:
return self.credit_days_for[ac]
def get_authorized_user(self):
if self.is_approving_authority==-1:
self.is_approving_authority = 0
# Fetch credit controller role
approving_authority = frappe.db.get_value("Accounts Settings", None,
"credit_controller")
# Check logged-in user is authorized
if approving_authority in frappe.user.get_roles():
self.is_approving_authority = 1
return self.is_approving_authority
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
for d in self.get("entries"):
if d.debit or d.credit:
gl_map.append(
self.get_gl_dict({
"account": d.account,
"against": d.against_account,
"debit": flt(d.debit, self.precision("debit", "entries")),
"credit": flt(d.credit, self.precision("credit", "entries")),
"against_voucher_type": ((d.against_voucher and "Purchase Invoice")
or (d.against_invoice and "Sales Invoice")
or (d.against_jv and "Journal Voucher")),
"against_voucher": d.against_voucher or d.against_invoice or d.against_jv,
"remarks": self.remark,
"cost_center": d.cost_center
})
)
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
def check_credit_limit(self):
for d in self.get("entries"):
master_type, master_name = frappe.db.get_value("Account", d.account,
["master_type", "master_name"])
if master_type == "Customer" and master_name:
super(JournalVoucher, self).check_credit_limit(d.account)
def get_balance(self):
if not self.get('entries'):
msgprint(_("'Entries' cannot be empty"), raise_exception=True)
else:
flag, self.total_debit, self.total_credit = 0, 0, 0
diff = flt(self.difference, self.precision("difference"))
# If any row without amount, set the diff on that row
for d in self.get('entries'):
if not d.credit and not d.debit and diff != 0:
if diff>0:
d.credit = diff
elif diff<0:
d.debit = diff
flag = 1
# Set the diff in a new row
if flag == 0 and diff != 0:
jd = self.append('entries', {})
if diff>0:
jd.credit = abs(diff)
elif diff<0:
jd.debit = abs(diff)
self.validate_debit_and_credit()
def get_outstanding_invoices(self):
self.set('entries', [])
total = 0
for d in self.get_values():
total += flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1 = self.append('entries', {})
jd1.account = d.account
if self.write_off_based_on == 'Accounts Receivable':
jd1.credit = flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1.against_invoice = cstr(d.name)
elif self.write_off_based_on == 'Accounts Payable':
jd1.debit = flt(d.outstanding_amount, self.precision("debit", "entries"))
jd1.against_voucher = cstr(d.name)
jd2 = self.append('entries', {})
if self.write_off_based_on == 'Accounts Receivable':
jd2.debit = total
elif self.write_off_based_on == 'Accounts Payable':
jd2.credit = total
self.validate_debit_and_credit()
def get_values(self):
cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \
if flt(self.write_off_amount) > 0 else ""
if self.write_off_based_on == 'Accounts Receivable':
return frappe.db.sql("""select name, debit_to as account, outstanding_amount
from `tabSales Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
elif self.write_off_based_on == 'Accounts Payable':
return frappe.db.sql("""select name, credit_to as account, outstanding_amount
from `tabPurchase Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
@frappe.whitelist()
def get_default_bank_cash_account(company, voucher_type):
from erpnext.accounts.utils import get_balance_on
account = frappe.db.get_value("Company", company,
voucher_type=="Bank Voucher" and "default_bank_account" or "default_cash_account")
if account:
return {
"account": account,
"balance": get_balance_on(account)
}
@frappe.whitelist()
def get_payment_entry_from_sales_invoice(sales_invoice):
from erpnext.accounts.utils import get_balance_on
si = frappe.get_doc("Sales Invoice", sales_invoice)
jv = get_payment_entry(si)
jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks)
# credit customer
jv.get("entries")[0].account = si.debit_to
jv.get("entries")[0].balance = get_balance_on(si.debit_to)
jv.get("entries")[0].credit = si.outstanding_amount
jv.get("entries")[0].against_invoice = si.name
# debit bank
jv.get("entries")[1].debit = si.outstanding_amount
return jv.as_dict()
@frappe.whitelist()
def get_payment_entry_from_purchase_invoice(purchase_invoice):
from erpnext.accounts.utils import get_balance_on
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
jv = get_payment_entry(pi)
jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks)
# credit supplier
jv.get("entries")[0].account = pi.credit_to
jv.get("entries")[0].balance = get_balance_on(pi.credit_to)
jv.get("entries")[0].debit = pi.outstanding_amount
jv.get("entries")[0].against_voucher = pi.name
# credit bank
jv.get("entries")[1].credit = pi.outstanding_amount
return jv.as_dict()
def get_payment_entry(doc):
bank_account = get_default_bank_cash_account(doc.company, "Bank Voucher")
jv = frappe.new_doc('Journal Voucher')
jv.voucher_type = 'Bank Voucher'
jv.company = doc.company
jv.fiscal_year = doc.fiscal_year
jv.append("entries")
d2 = jv.append("entries")
if bank_account:
d2.account = bank_account["account"]
d2.balance = bank_account["balance"]
return jv
@frappe.whitelist()
def get_opening_accounts(company):
"""get all balance sheet accounts for opening entry"""
from erpnext.accounts.utils import get_balance_on
accounts = frappe.db.sql_list("""select name from tabAccount
where group_or_ledger='Ledger' and report_type='Balance Sheet' and company=%s""", company)
return [{"account": a, "balance": get_balance_on(a)} for a in accounts]
def get_against_purchase_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, credit_to, outstanding_amount, bill_no, bill_date
from `tabPurchase Invoice` where credit_to = %s and docstatus = 1
and outstanding_amount > 0 and %s like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_sales_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, debit_to, outstanding_amount
from `tabSales Invoice` where debit_to = %s and docstatus = 1
and outstanding_amount > 0 and `%s` like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select jv.name, jv.posting_date, jv.user_remark
from `tabJournal Voucher` jv, `tabJournal Voucher Detail` jv_detail
where jv_detail.parent = jv.name and jv_detail.account = %s and jv.docstatus = 1
and jv.%s like %s order by jv.name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
@frappe.whitelist()
def get_outstanding(args):
args = eval(args)
if args.get("doctype") == "Journal Voucher" and args.get("account"):
against_jv_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabJournal Voucher Detail` where parent=%s and account=%s
and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')=''
and ifnull(against_jv, '')=''""", (args['docname'], args['account']))
against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0
if against_jv_amount > 0:
return {"credit": against_jv_amount}
else:
return {"debit": -1* against_jv_amount}
elif args.get("doctype") == "Sales Invoice":
return {
"credit": flt(frappe.db.get_value("Sales Invoice", args["docname"],
"outstanding_amount"))
}
elif args.get("doctype") == "Purchase Invoice":
return {
"debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"],
"outstanding_amount"))
}
Minor fix in Journal Voucher
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, cstr, flt, fmt_money, formatdate, getdate
from frappe import msgprint, _
from erpnext.setup.utils import get_company_currency
from erpnext.controllers.accounts_controller import AccountsController
class JournalVoucher(AccountsController):
def __init__(self, arg1, arg2=None):
super(JournalVoucher, self).__init__(arg1, arg2)
self.master_type = {}
self.credit_days_for = {}
self.credit_days_global = -1
self.is_approving_authority = -1
def validate(self):
if not self.is_opening:
self.is_opening='No'
self.clearance_date = None
super(JournalVoucher, self).validate_date_with_fiscal_year()
self.validate_cheque_info()
self.validate_entries_for_advance()
self.validate_debit_and_credit()
self.validate_against_jv()
self.validate_against_sales_invoice()
self.validate_against_purchase_invoice()
self.set_against_account()
self.create_remarks()
self.set_aging_date()
self.set_print_format_fields()
def on_submit(self):
if self.voucher_type in ['Bank Voucher', 'Contra Voucher', 'Journal Entry']:
self.check_credit_days()
self.make_gl_entries()
self.check_credit_limit()
def on_cancel(self):
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_jv")
self.make_gl_entries(1)
def validate_cheque_info(self):
if self.voucher_type in ['Bank Voucher']:
if not self.cheque_no or not self.cheque_date:
msgprint(_("Reference No & Reference Date is required for {0}").format(self.voucher_type),
raise_exception=1)
if self.cheque_date and not self.cheque_no:
msgprint(_("Reference No is mandatory if you entered Reference Date"), raise_exception=1)
def validate_entries_for_advance(self):
for d in self.get('entries'):
if not d.is_advance and not d.against_voucher and \
not d.against_invoice and not d.against_jv:
master_type = frappe.db.get_value("Account", d.account, "master_type")
if (master_type == 'Customer' and flt(d.credit) > 0) or \
(master_type == 'Supplier' and flt(d.debit) > 0):
msgprint(_("Please check 'Is Advance' against Account {0} if this is an advance entry.").format(d.account))
def validate_against_jv(self):
for d in self.get('entries'):
if d.against_jv:
if d.against_jv == self.name:
frappe.throw(_("You can not enter current voucher in 'Against Journal Voucher' column"))
against_entries = frappe.db.sql("""select * from `tabJournal Voucher Detail`
where account = %s and docstatus = 1 and parent = %s
and ifnull(against_jv, '') = ''""", (d.account, d.against_jv), as_dict=True)
if not against_entries:
frappe.throw(_("Journal Voucher {0} does not have account {1} or already matched")
.format(d.against_jv, d.account))
else:
dr_or_cr = "debit" if d.credit > 0 else "credit"
valid = False
for jvd in against_entries:
if flt(jvd[dr_or_cr]) > 0:
valid = True
if not valid:
frappe.throw(_("Against Journal Voucher {0} does not have any unmatched {1} entry")
.format(d.against_jv, dr_or_cr))
def validate_against_sales_invoice(self):
for d in self.get("entries"):
if d.against_invoice:
if d.debit > 0:
frappe.throw(_("Row {0}: Debit entry can not be linked with a Sales Invoice")
.format(d.idx))
if frappe.db.get_value("Sales Invoice", d.against_invoice, "debit_to") != d.account:
frappe.throw(_("Row {0}: Account does not match with \
Sales Invoice Debit To account").format(d.idx, d.account))
def validate_against_purchase_invoice(self):
for d in self.get("entries"):
if d.against_voucher:
if flt(d.credit) > 0:
frappe.throw(_("Row {0}: Credit entry can not be linked with a Purchase Invoice")
.format(d.idx))
if frappe.db.get_value("Purchase Invoice", d.against_voucher, "credit_to") != d.account:
frappe.throw(_("Row {0}: Account does not match with \
Purchase Invoice Credit To account").format(d.idx, d.account))
def set_against_account(self):
accounts_debited, accounts_credited = [], []
for d in self.get("entries"):
if flt(d.debit > 0): accounts_debited.append(d.account)
if flt(d.credit) > 0: accounts_credited.append(d.account)
for d in self.get("entries"):
if flt(d.debit > 0): d.against_account = ", ".join(list(set(accounts_credited)))
if flt(d.credit > 0): d.against_account = ", ".join(list(set(accounts_debited)))
def validate_debit_and_credit(self):
self.total_debit, self.total_credit, self.difference = 0, 0, 0
for d in self.get("entries"):
if d.debit and d.credit:
frappe.throw(_("You cannot credit and debit same account at the same time"))
self.total_debit = flt(self.total_debit) + flt(d.debit, self.precision("debit", "entries"))
self.total_credit = flt(self.total_credit) + flt(d.credit, self.precision("credit", "entries"))
self.difference = flt(self.total_debit, self.precision("total_debit")) - \
flt(self.total_credit, self.precision("total_credit"))
if self.difference:
frappe.throw(_("Total Debit must be equal to Total Credit. The difference is {0}")
.format(self.difference))
def create_remarks(self):
r = []
if self.cheque_no:
if self.cheque_date:
r.append(_('Reference #{0} dated {1}').format(self.cheque_no, formatdate(self.cheque_date)))
else:
msgprint(_("Please enter Reference date"), raise_exception=1)
for d in self.get('entries'):
if d.against_invoice and d.credit:
currency = frappe.db.get_value("Sales Invoice", d.against_invoice, "currency")
r.append(_("{0} {1} against Invoice {1}").format(currency, fmt_money(flt(d.credit)), d.against_invoice))
if d.against_voucher and d.debit:
bill_no = frappe.db.sql("""select bill_no, bill_date, currency
from `tabPurchase Invoice` where name=%s""", d.against_voucher)
if bill_no and bill_no[0][0] and bill_no[0][0].lower().strip() \
not in ['na', 'not applicable', 'none']:
r.append(_('{0} {1} against Bill {2} dated {3}').format(bill_no[0][2],
fmt_money(flt(d.debit)), bill_no[0][0],
bill_no[0][1] and formatdate(bill_no[0][1].strftime('%Y-%m-%d'))))
if self.user_remark:
r.append(_("Note: {0}").format(self.user_remark))
if r:
self.remark = ("\n").join(r)
else:
frappe.msgprint(_("User Remarks is mandatory"), raise_exception=1)
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
else:
# check account type whether supplier or customer
exists = False
for d in self.get('entries'):
account_type = frappe.db.get_value("Account", d.account, "account_type")
if account_type in ["Supplier", "Customer"]:
exists = True
break
# If customer/supplier account, aging date is mandatory
if exists and not self.aging_date:
msgprint(_("Aging Date is mandatory for opening entry"), raise_exception=1)
else:
self.aging_date = self.posting_date
def set_print_format_fields(self):
for d in self.get('entries'):
result = frappe.db.get_value("Account", d.account,
["account_type", "master_type"])
if not result:
continue
account_type, master_type = result
if master_type in ['Supplier', 'Customer']:
if not self.pay_to_recd_from:
self.pay_to_recd_from = frappe.db.get_value(master_type,
' - '.join(d.account.split(' - ')[:-1]),
master_type == 'Customer' and 'customer_name' or 'supplier_name')
if account_type in ['Bank', 'Cash']:
company_currency = get_company_currency(self.company)
amt = flt(d.debit) and d.debit or d.credit
self.total_amount = company_currency + ' ' + cstr(amt)
from frappe.utils import money_in_words
self.total_amount_in_words = money_in_words(amt, company_currency)
def check_credit_days(self):
date_diff = 0
if self.cheque_date:
date_diff = (getdate(self.cheque_date)-getdate(self.posting_date)).days
if date_diff <= 0: return
# Get List of Customer Account
acc_list = filter(lambda d: frappe.db.get_value("Account", d.account,
"master_type")=='Customer', self.get('entries'))
for d in acc_list:
credit_days = self.get_credit_days_for(d.account)
# Check credit days
if credit_days > 0 and not self.get_authorized_user() and cint(date_diff) > credit_days:
msgprint(_("Maximum allowed credit is {0} days after posting date").format(credit_days),
raise_exception=1)
def get_credit_days_for(self, ac):
if not self.credit_days_for.has_key(ac):
self.credit_days_for[ac] = cint(frappe.db.get_value("Account", ac, "credit_days"))
if not self.credit_days_for[ac]:
if self.credit_days_global==-1:
self.credit_days_global = cint(frappe.db.get_value("Company",
self.company, "credit_days"))
return self.credit_days_global
else:
return self.credit_days_for[ac]
def get_authorized_user(self):
if self.is_approving_authority==-1:
self.is_approving_authority = 0
# Fetch credit controller role
approving_authority = frappe.db.get_value("Accounts Settings", None,
"credit_controller")
# Check logged-in user is authorized
if approving_authority in frappe.user.get_roles():
self.is_approving_authority = 1
return self.is_approving_authority
def make_gl_entries(self, cancel=0, adv_adj=0):
from erpnext.accounts.general_ledger import make_gl_entries
gl_map = []
for d in self.get("entries"):
if d.debit or d.credit:
gl_map.append(
self.get_gl_dict({
"account": d.account,
"against": d.against_account,
"debit": flt(d.debit, self.precision("debit", "entries")),
"credit": flt(d.credit, self.precision("credit", "entries")),
"against_voucher_type": ((d.against_voucher and "Purchase Invoice")
or (d.against_invoice and "Sales Invoice")
or (d.against_jv and "Journal Voucher")),
"against_voucher": d.against_voucher or d.against_invoice or d.against_jv,
"remarks": self.remark,
"cost_center": d.cost_center
})
)
if gl_map:
make_gl_entries(gl_map, cancel=cancel, adv_adj=adv_adj)
def check_credit_limit(self):
for d in self.get("entries"):
master_type, master_name = frappe.db.get_value("Account", d.account,
["master_type", "master_name"])
if master_type == "Customer" and master_name:
super(JournalVoucher, self).check_credit_limit(d.account)
def get_balance(self):
if not self.get('entries'):
msgprint(_("'Entries' cannot be empty"), raise_exception=True)
else:
flag, self.total_debit, self.total_credit = 0, 0, 0
diff = flt(self.difference, self.precision("difference"))
# If any row without amount, set the diff on that row
for d in self.get('entries'):
if not d.credit and not d.debit and diff != 0:
if diff>0:
d.credit = diff
elif diff<0:
d.debit = diff
flag = 1
# Set the diff in a new row
if flag == 0 and diff != 0:
jd = self.append('entries', {})
if diff>0:
jd.credit = abs(diff)
elif diff<0:
jd.debit = abs(diff)
self.validate_debit_and_credit()
def get_outstanding_invoices(self):
self.set('entries', [])
total = 0
for d in self.get_values():
total += flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1 = self.append('entries', {})
jd1.account = d.account
if self.write_off_based_on == 'Accounts Receivable':
jd1.credit = flt(d.outstanding_amount, self.precision("credit", "entries"))
jd1.against_invoice = cstr(d.name)
elif self.write_off_based_on == 'Accounts Payable':
jd1.debit = flt(d.outstanding_amount, self.precision("debit", "entries"))
jd1.against_voucher = cstr(d.name)
jd2 = self.append('entries', {})
if self.write_off_based_on == 'Accounts Receivable':
jd2.debit = total
elif self.write_off_based_on == 'Accounts Payable':
jd2.credit = total
self.validate_debit_and_credit()
def get_values(self):
cond = " and outstanding_amount <= {0}".format(self.write_off_amount) \
if flt(self.write_off_amount) > 0 else ""
if self.write_off_based_on == 'Accounts Receivable':
return frappe.db.sql("""select name, debit_to as account, outstanding_amount
from `tabSales Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
elif self.write_off_based_on == 'Accounts Payable':
return frappe.db.sql("""select name, credit_to as account, outstanding_amount
from `tabPurchase Invoice` where docstatus = 1 and company = %s
and outstanding_amount > 0 %s""" % ('%s', cond), self.company, as_dict=True)
@frappe.whitelist()
def get_default_bank_cash_account(company, voucher_type):
from erpnext.accounts.utils import get_balance_on
account = frappe.db.get_value("Company", company,
voucher_type=="Bank Voucher" and "default_bank_account" or "default_cash_account")
if account:
return {
"account": account,
"balance": get_balance_on(account)
}
@frappe.whitelist()
def get_payment_entry_from_sales_invoice(sales_invoice):
from erpnext.accounts.utils import get_balance_on
si = frappe.get_doc("Sales Invoice", sales_invoice)
jv = get_payment_entry(si)
jv.remark = 'Payment received against Sales Invoice {0}. {1}'.format(si.name, si.remarks)
# credit customer
jv.get("entries")[0].account = si.debit_to
jv.get("entries")[0].balance = get_balance_on(si.debit_to)
jv.get("entries")[0].credit = si.outstanding_amount
jv.get("entries")[0].against_invoice = si.name
# debit bank
jv.get("entries")[1].debit = si.outstanding_amount
return jv.as_dict()
@frappe.whitelist()
def get_payment_entry_from_purchase_invoice(purchase_invoice):
from erpnext.accounts.utils import get_balance_on
pi = frappe.get_doc("Purchase Invoice", purchase_invoice)
jv = get_payment_entry(pi)
jv.remark = 'Payment against Purchase Invoice {0}. {1}'.format(pi.name, pi.remarks)
# credit supplier
jv.get("entries")[0].account = pi.credit_to
jv.get("entries")[0].balance = get_balance_on(pi.credit_to)
jv.get("entries")[0].debit = pi.outstanding_amount
jv.get("entries")[0].against_voucher = pi.name
# credit bank
jv.get("entries")[1].credit = pi.outstanding_amount
return jv.as_dict()
def get_payment_entry(doc):
bank_account = get_default_bank_cash_account(doc.company, "Bank Voucher")
jv = frappe.new_doc('Journal Voucher')
jv.voucher_type = 'Bank Voucher'
jv.company = doc.company
jv.fiscal_year = doc.fiscal_year
jv.append("entries")
d2 = jv.append("entries")
if bank_account:
d2.account = bank_account["account"]
d2.balance = bank_account["balance"]
return jv
@frappe.whitelist()
def get_opening_accounts(company):
"""get all balance sheet accounts for opening entry"""
from erpnext.accounts.utils import get_balance_on
accounts = frappe.db.sql_list("""select name from tabAccount
where group_or_ledger='Ledger' and report_type='Balance Sheet' and company=%s""", company)
return [{"account": a, "balance": get_balance_on(a)} for a in accounts]
def get_against_purchase_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, credit_to, outstanding_amount, bill_no, bill_date
from `tabPurchase Invoice` where credit_to = %s and docstatus = 1
and outstanding_amount > 0 and %s like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_sales_invoice(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, debit_to, outstanding_amount
from `tabSales Invoice` where debit_to = %s and docstatus = 1
and outstanding_amount > 0 and `%s` like %s order by name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
def get_against_jv(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select jv.name, jv.posting_date, jv.user_remark
from `tabJournal Voucher` jv, `tabJournal Voucher Detail` jv_detail
where jv_detail.parent = jv.name and jv_detail.account = %s and jv.docstatus = 1
and jv.%s like %s order by jv.name desc limit %s, %s""" %
("%s", searchfield, "%s", "%s", "%s"),
(filters["account"], "%%%s%%" % txt, start, page_len))
@frappe.whitelist()
def get_outstanding(args):
args = eval(args)
if args.get("doctype") == "Journal Voucher" and args.get("account"):
against_jv_amount = frappe.db.sql("""
select sum(ifnull(debit, 0)) - sum(ifnull(credit, 0))
from `tabJournal Voucher Detail` where parent=%s and account=%s
and ifnull(against_invoice, '')='' and ifnull(against_voucher, '')=''
and ifnull(against_jv, '')=''""", (args['docname'], args['account']))
against_jv_amount = flt(against_jv_amount[0][0]) if against_jv_amount else 0
if against_jv_amount > 0:
return {"credit": against_jv_amount}
else:
return {"debit": -1* against_jv_amount}
elif args.get("doctype") == "Sales Invoice":
return {
"credit": flt(frappe.db.get_value("Sales Invoice", args["docname"],
"outstanding_amount"))
}
elif args.get("doctype") == "Purchase Invoice":
return {
"debit": flt(frappe.db.get_value("Purchase Invoice", args["docname"],
"outstanding_amount"))
}
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
import locale
import math
import re
import sys
import time
from datetime import tzinfo, timedelta, datetime, date
from trac.core import TracError
from trac.util.text import to_unicode
from trac.util.translation import _, ngettext
# Date/time utilities
# -- conversion
def to_datetime(t, tzinfo=None):
"""Convert ``t`` into a `datetime` object in the ``tzinfo`` timezone.
If no ``tzinfo`` is given, the local timezone `localtz` will be used.
``t`` is converted using the following rules:
- If ``t`` is already a `datetime` object,
- if it is timezone-"naive", it is localized to ``tzinfo``
- if it is already timezone-aware, ``t`` is mapped to the given
timezone (`datetime.datetime.astimezone`)
- If ``t`` is None, the current time will be used.
- If ``t`` is a number, it is interpreted as a timestamp.
Any other input will trigger a `TypeError`.
All returned datetime instances are timezone aware and normalized.
"""
tz = tzinfo or localtz
if t is None:
dt = datetime.now(tz)
elif isinstance(t, datetime):
if t.tzinfo:
dt = t.astimezone(tz)
else:
dt = tz.localize(t)
elif isinstance(t, date):
dt = tz.localize(datetime(t.year, t.month, t.day))
elif isinstance(t, (int, long, float)):
if not (_min_ts <= t <= _max_ts):
# Handle microsecond timestamps for 0.11 compatibility
t *= 0.000001
if t < 0 and isinstance(t, float):
# Work around negative fractional times bug in Python 2.4
# http://bugs.python.org/issue1646728
frac, integer = math.modf(t)
dt = datetime.fromtimestamp(integer - 1, tz) + \
timedelta(seconds=frac + 1)
else:
dt = datetime.fromtimestamp(t, tz)
if dt:
return tz.normalize(dt)
raise TypeError('expecting datetime, int, long, float, or None; got %s' %
type(t))
def to_timestamp(dt):
"""Return the corresponding POSIX timestamp"""
if dt:
diff = dt - _epoc
return diff.days * 86400 + diff.seconds
else:
return 0
def to_utimestamp(dt):
"""Return a microsecond POSIX timestamp for the given `datetime`."""
if not dt:
return 0
diff = dt - _epoc
return (diff.days * 86400000000L + diff.seconds * 1000000
+ diff.microseconds)
def from_utimestamp(ts):
"""Return the `datetime` for the given microsecond POSIX timestamp."""
return _epoc + timedelta(microseconds=ts or 0)
# -- formatting
_units = (
(3600*24*365, lambda r: ngettext('%(num)d year', '%(num)d years', r)),
(3600*24*30, lambda r: ngettext('%(num)d month', '%(num)d months', r)),
(3600*24*7, lambda r: ngettext('%(num)d week', '%(num)d weeks', r)),
(3600*24, lambda r: ngettext('%(num)d day', '%(num)d days', r)),
(3600, lambda r: ngettext('%(num)d hour', '%(num)d hours', r)),
(60, lambda r: ngettext('%(num)d minute', '%(num)d minutes', r)))
def pretty_timedelta(time1, time2=None, resolution=None):
"""Calculate time delta between two `datetime` objects.
(the result is somewhat imprecise, only use for prettyprinting).
If either `time1` or `time2` is None, the current time will be used
instead.
"""
time1 = to_datetime(time1)
time2 = to_datetime(time2)
if time1 > time2:
time2, time1 = time1, time2
diff = time2 - time1
age_s = int(diff.days * 86400 + diff.seconds)
if resolution and age_s < resolution:
return ''
if age_s <= 60 * 1.9:
return ngettext('%(num)i second', '%(num)i seconds', age_s)
for u, format_units in _units:
r = float(age_s) / float(u)
if r >= 1.9:
r = int(round(r))
return format_units(r)
return ''
def format_datetime(t=None, format='%x %X', tzinfo=None):
"""Format the `datetime` object `t` into an `unicode` string
If `t` is None, the current time will be used.
The formatting will be done using the given `format`, which consist
of conventional `strftime` keys. In addition the format can be 'iso8601'
to specify the international date format (compliant with RFC 3339).
`tzinfo` will default to the local timezone if left to `None`.
"""
tz = tzinfo or localtz
t = to_datetime(t, tz)
normalize_Z = False
if format.lower().startswith('iso8601'):
if 'date' in format:
format = '%Y-%m-%d'
elif 'time' in format:
format = '%H:%M:%S%z'
normalize_Z = True
else:
format = '%Y-%m-%dT%H:%M:%S%z'
normalize_Z = True
text = t.strftime(str(format))
if normalize_Z:
text = text.replace('+0000', 'Z')
if not text.endswith('Z'):
text = text[:-2] + ":" + text[-2:]
encoding = locale.getpreferredencoding() or sys.getdefaultencoding()
if sys.platform != 'win32' or sys.version_info[:2] > (2, 3):
encoding = locale.getlocale(locale.LC_TIME)[1] or encoding
# Python 2.3 on windows doesn't know about 'XYZ' alias for 'cpXYZ'
return unicode(text, encoding, 'replace')
def format_date(t=None, format='%x', tzinfo=None):
"""Convenience method for formatting the date part of a `datetime` object.
See `format_datetime` for more details.
"""
if format == 'iso8601':
format = 'iso8601date'
return format_datetime(t, format, tzinfo=tzinfo)
def format_time(t=None, format='%X', tzinfo=None):
"""Convenience method for formatting the time part of a `datetime` object.
See `format_datetime` for more details.
"""
if format == 'iso8601':
format = 'iso8601time'
return format_datetime(t, format, tzinfo=tzinfo)
def get_date_format_hint():
"""Present the default format used by `format_date` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1)
def get_datetime_format_hint():
"""Present the default format used by `format_datetime` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1) \
.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def http_date(t=None):
"""Format `datetime` object `t` as a rfc822 timestamp"""
t = to_datetime(t).astimezone(utc)
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
return '%s, %02d %s %04d %02d:%02d:%02d GMT' % (
weekdays[t.weekday()], t.day, months[t.month - 1], t.year,
t.hour, t.minute, t.second)
# -- parsing
_ISO_8601_RE = re.compile(r'''
(\d\d\d\d)(?:-?(\d\d)(?:-?(\d\d))?)? # date
(?:T(\d\d)(?::?(\d\d)(?::?(\d\d))?)?)? # time
(Z?(?:([-+])?(\d\d):?(\d\d)?)?)?$ # timezone
''', re.VERBOSE)
def parse_date(text, tzinfo=None, hint='date'):
tzinfo = tzinfo or localtz
dt = None
text = text.strip()
# normalize ISO time
match = _ISO_8601_RE.match(text)
if match:
try:
g = match.groups()
years = g[0]
months = g[1] or '01'
days = g[2] or '01'
hours, minutes, seconds = [x or '00' for x in g[3:6]]
z, tzsign, tzhours, tzminutes = g[6:10]
if z:
tz = timedelta(hours=int(tzhours or '0'),
minutes=int(tzminutes or '0')).seconds / 60
if tz == 0:
tzinfo = utc
else:
tzinfo = FixedOffset(tzsign == '-' and -tz or tz,
'%s%s:%s' %
(tzsign, tzhours, tzminutes))
tm = time.strptime('%s ' * 6 % (years, months, days,
hours, minutes, seconds),
'%Y %m %d %H %M %S ')
dt = tzinfo.localize(datetime(*tm[0:6]))
except ValueError:
pass
if dt is None:
for format in ['%x %X', '%x, %X', '%X %x', '%X, %x', '%x', '%c',
'%b %d, %Y']:
try:
tm = time.strptime(text, format)
dt = tzinfo.localize(datetime(*tm[0:6]))
break
except ValueError:
continue
if dt is None:
dt = _parse_relative_time(text, tzinfo)
if dt is None:
hint = {'datetime': get_datetime_format_hint,
'date': get_date_format_hint}.get(hint, lambda: hint)()
raise TracError(_('"%(date)s" is an invalid date, or the date format '
'is not known. Try "%(hint)s" instead.',
date=text, hint=hint), _('Invalid Date'))
# Make sure we can convert it to a timestamp and back - fromtimestamp()
# may raise ValueError if larger than platform C localtime() or gmtime()
try:
to_datetime(to_timestamp(dt), tzinfo)
except ValueError:
raise TracError(_('The date "%(date)s" is outside valid range. '
'Try a date closer to present time.', date=text),
_('Invalid Date'))
return dt
_REL_TIME_RE = re.compile(
r'(\d+\.?\d*)\s*'
r'(second|minute|hour|day|week|month|year|[hdwmy])s?\s*'
r'(?:ago)?$')
_time_intervals = dict(
second=lambda v: timedelta(seconds=v),
minute=lambda v: timedelta(minutes=v),
hour=lambda v: timedelta(hours=v),
day=lambda v: timedelta(days=v),
week=lambda v: timedelta(weeks=v),
month=lambda v: timedelta(days=30 * v),
year=lambda v: timedelta(days=365 * v),
h=lambda v: timedelta(hours=v),
d=lambda v: timedelta(days=v),
w=lambda v: timedelta(weeks=v),
m=lambda v: timedelta(days=30 * v),
y=lambda v: timedelta(days=365 * v),
)
_TIME_START_RE = re.compile(r'(this|last)\s*'
r'(second|minute|hour|day|week|month|year)$')
_time_starts = dict(
second=lambda now: now.replace(microsecond=0),
minute=lambda now: now.replace(microsecond=0, second=0),
hour=lambda now: now.replace(microsecond=0, second=0, minute=0),
day=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0),
week=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0) \
- timedelta(days=now.weekday()),
month=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0,
day=1),
year=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0,
day=1, month=1),
)
def _parse_relative_time(text, tzinfo):
now = tzinfo.localize(datetime.now())
if text == 'now':
return now
if text == 'today':
return now.replace(microsecond=0, second=0, minute=0, hour=0)
if text == 'yesterday':
return now.replace(microsecond=0, second=0, minute=0, hour=0) \
- timedelta(days=1)
match = _REL_TIME_RE.match(text)
if match:
(value, interval) = match.groups()
return now - _time_intervals[interval](float(value))
match = _TIME_START_RE.match(text)
if match:
(which, start) = match.groups()
dt = _time_starts[start](now)
if which == 'last':
if start == 'month':
if dt.month > 1:
dt = dt.replace(month=dt.month - 1)
else:
dt = dt.replace(year=dt.year - 1, month=12)
else:
dt -= _time_intervals[start](1)
return dt
return None
# -- timezone utilities
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return _zero
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
"""A 'local' time zone implementation"""
def __str__(self):
return self.tzname(datetime.now())
def __repr__(self):
return '<LocalTimezone "%s" %s "%s" %s>' % (
time.tzname[False], STDOFFSET,
time.tzname[True], DSTOFFSET)
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return _zero
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
except OverflowError:
return False
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
utc = FixedOffset(0, 'UTC')
utcmin = datetime.min.replace(tzinfo=utc)
utcmax = datetime.max.replace(tzinfo=utc)
_epoc = datetime(1970, 1, 1, tzinfo=utc)
_zero = timedelta(0)
_min_ts = -(1 << 31)
_max_ts = (1 << 31) - 1
localtz = LocalTimezone()
# Use a makeshift timezone implementation if pytz is not available.
# This implementation only supports fixed offset time zones.
#
_timezones = [
FixedOffset(0, 'UTC'),
FixedOffset(-720, 'GMT -12:00'), FixedOffset(-660, 'GMT -11:00'),
FixedOffset(-600, 'GMT -10:00'), FixedOffset(-540, 'GMT -9:00'),
FixedOffset(-480, 'GMT -8:00'), FixedOffset(-420, 'GMT -7:00'),
FixedOffset(-360, 'GMT -6:00'), FixedOffset(-300, 'GMT -5:00'),
FixedOffset(-240, 'GMT -4:00'), FixedOffset(-180, 'GMT -3:00'),
FixedOffset(-120, 'GMT -2:00'), FixedOffset(-60, 'GMT -1:00'),
FixedOffset(0, 'GMT'), FixedOffset(60, 'GMT +1:00'),
FixedOffset(120, 'GMT +2:00'), FixedOffset(180, 'GMT +3:00'),
FixedOffset(240, 'GMT +4:00'), FixedOffset(300, 'GMT +5:00'),
FixedOffset(360, 'GMT +6:00'), FixedOffset(420, 'GMT +7:00'),
FixedOffset(480, 'GMT +8:00'), FixedOffset(540, 'GMT +9:00'),
FixedOffset(600, 'GMT +10:00'), FixedOffset(660, 'GMT +11:00'),
FixedOffset(720, 'GMT +12:00'), FixedOffset(780, 'GMT +13:00')]
_tzmap = dict([(z.zone, z) for z in _timezones])
all_timezones = [z.zone for z in _timezones]
try:
import pytz
_tzoffsetmap = dict([(tz.utcoffset(None), tz) for tz in _timezones
if tz.zone != 'UTC'])
def timezone(tzname):
tz = get_timezone(tzname)
if not tz:
raise KeyError(tzname)
return tz
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
try:
# if given unicode parameter, pytz.timezone fails with:
# "type() argument 1 must be string, not unicode"
tz = pytz.timezone(to_unicode(tzname).encode('ascii', 'replace'))
except (KeyError, IOError):
tz = _tzmap.get(tzname)
if tz and tzname.startswith('Etc/'):
tz = _tzoffsetmap.get(tz.utcoffset(None))
return tz
_pytz_zones = [tzname for tzname in pytz.common_timezones
if not tzname.startswith('Etc/') and
not tzname.startswith('GMT')]
# insert just the GMT timezones into the pytz zones at the right location
# the pytz zones already include UTC so skip it
from bisect import bisect
_gmt_index = bisect(_pytz_zones, 'GMT')
all_timezones = _pytz_zones[:_gmt_index] + all_timezones[1:] + \
_pytz_zones[_gmt_index:]
except ImportError:
pytz = None
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
return _tzmap[tzname]
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
return _tzmap.get(tzname)
0.12.5dev: show standardized UTC+-XX:YY name as localtz timezone name (closes #10920)
git-svn-id: 764b04d0c6fbbea0ee05220aef3208173ff13049@11414 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
import locale
import math
import re
import sys
import time
from datetime import tzinfo, timedelta, datetime, date
from trac.core import TracError
from trac.util.text import to_unicode
from trac.util.translation import _, ngettext
# Date/time utilities
# -- conversion
def to_datetime(t, tzinfo=None):
"""Convert ``t`` into a `datetime` object in the ``tzinfo`` timezone.
If no ``tzinfo`` is given, the local timezone `localtz` will be used.
``t`` is converted using the following rules:
- If ``t`` is already a `datetime` object,
- if it is timezone-"naive", it is localized to ``tzinfo``
- if it is already timezone-aware, ``t`` is mapped to the given
timezone (`datetime.datetime.astimezone`)
- If ``t`` is None, the current time will be used.
- If ``t`` is a number, it is interpreted as a timestamp.
Any other input will trigger a `TypeError`.
All returned datetime instances are timezone aware and normalized.
"""
tz = tzinfo or localtz
if t is None:
dt = datetime.now(tz)
elif isinstance(t, datetime):
if t.tzinfo:
dt = t.astimezone(tz)
else:
dt = tz.localize(t)
elif isinstance(t, date):
dt = tz.localize(datetime(t.year, t.month, t.day))
elif isinstance(t, (int, long, float)):
if not (_min_ts <= t <= _max_ts):
# Handle microsecond timestamps for 0.11 compatibility
t *= 0.000001
if t < 0 and isinstance(t, float):
# Work around negative fractional times bug in Python 2.4
# http://bugs.python.org/issue1646728
frac, integer = math.modf(t)
dt = datetime.fromtimestamp(integer - 1, tz) + \
timedelta(seconds=frac + 1)
else:
dt = datetime.fromtimestamp(t, tz)
if dt:
return tz.normalize(dt)
raise TypeError('expecting datetime, int, long, float, or None; got %s' %
type(t))
def to_timestamp(dt):
"""Return the corresponding POSIX timestamp"""
if dt:
diff = dt - _epoc
return diff.days * 86400 + diff.seconds
else:
return 0
def to_utimestamp(dt):
"""Return a microsecond POSIX timestamp for the given `datetime`."""
if not dt:
return 0
diff = dt - _epoc
return (diff.days * 86400000000L + diff.seconds * 1000000
+ diff.microseconds)
def from_utimestamp(ts):
"""Return the `datetime` for the given microsecond POSIX timestamp."""
return _epoc + timedelta(microseconds=ts or 0)
# -- formatting
_units = (
(3600*24*365, lambda r: ngettext('%(num)d year', '%(num)d years', r)),
(3600*24*30, lambda r: ngettext('%(num)d month', '%(num)d months', r)),
(3600*24*7, lambda r: ngettext('%(num)d week', '%(num)d weeks', r)),
(3600*24, lambda r: ngettext('%(num)d day', '%(num)d days', r)),
(3600, lambda r: ngettext('%(num)d hour', '%(num)d hours', r)),
(60, lambda r: ngettext('%(num)d minute', '%(num)d minutes', r)))
def pretty_timedelta(time1, time2=None, resolution=None):
"""Calculate time delta between two `datetime` objects.
(the result is somewhat imprecise, only use for prettyprinting).
If either `time1` or `time2` is None, the current time will be used
instead.
"""
time1 = to_datetime(time1)
time2 = to_datetime(time2)
if time1 > time2:
time2, time1 = time1, time2
diff = time2 - time1
age_s = int(diff.days * 86400 + diff.seconds)
if resolution and age_s < resolution:
return ''
if age_s <= 60 * 1.9:
return ngettext('%(num)i second', '%(num)i seconds', age_s)
for u, format_units in _units:
r = float(age_s) / float(u)
if r >= 1.9:
r = int(round(r))
return format_units(r)
return ''
def format_datetime(t=None, format='%x %X', tzinfo=None):
"""Format the `datetime` object `t` into an `unicode` string
If `t` is None, the current time will be used.
The formatting will be done using the given `format`, which consist
of conventional `strftime` keys. In addition the format can be 'iso8601'
to specify the international date format (compliant with RFC 3339).
`tzinfo` will default to the local timezone if left to `None`.
"""
tz = tzinfo or localtz
t = to_datetime(t, tz)
normalize_Z = False
if format.lower().startswith('iso8601'):
if 'date' in format:
format = '%Y-%m-%d'
elif 'time' in format:
format = '%H:%M:%S%z'
normalize_Z = True
else:
format = '%Y-%m-%dT%H:%M:%S%z'
normalize_Z = True
text = t.strftime(str(format))
if normalize_Z:
text = text.replace('+0000', 'Z')
if not text.endswith('Z'):
text = text[:-2] + ":" + text[-2:]
encoding = locale.getpreferredencoding() or sys.getdefaultencoding()
if sys.platform != 'win32' or sys.version_info[:2] > (2, 3):
encoding = locale.getlocale(locale.LC_TIME)[1] or encoding
# Python 2.3 on windows doesn't know about 'XYZ' alias for 'cpXYZ'
return unicode(text, encoding, 'replace')
def format_date(t=None, format='%x', tzinfo=None):
"""Convenience method for formatting the date part of a `datetime` object.
See `format_datetime` for more details.
"""
if format == 'iso8601':
format = 'iso8601date'
return format_datetime(t, format, tzinfo=tzinfo)
def format_time(t=None, format='%X', tzinfo=None):
"""Convenience method for formatting the time part of a `datetime` object.
See `format_datetime` for more details.
"""
if format == 'iso8601':
format = 'iso8601time'
return format_datetime(t, format, tzinfo=tzinfo)
def get_date_format_hint():
"""Present the default format used by `format_date` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1)
def get_datetime_format_hint():
"""Present the default format used by `format_datetime` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1) \
.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def http_date(t=None):
"""Format `datetime` object `t` as a rfc822 timestamp"""
t = to_datetime(t).astimezone(utc)
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
return '%s, %02d %s %04d %02d:%02d:%02d GMT' % (
weekdays[t.weekday()], t.day, months[t.month - 1], t.year,
t.hour, t.minute, t.second)
# -- parsing
_ISO_8601_RE = re.compile(r'''
(\d\d\d\d)(?:-?(\d\d)(?:-?(\d\d))?)? # date
(?:T(\d\d)(?::?(\d\d)(?::?(\d\d))?)?)? # time
(Z?(?:([-+])?(\d\d):?(\d\d)?)?)?$ # timezone
''', re.VERBOSE)
def parse_date(text, tzinfo=None, hint='date'):
tzinfo = tzinfo or localtz
dt = None
text = text.strip()
# normalize ISO time
match = _ISO_8601_RE.match(text)
if match:
try:
g = match.groups()
years = g[0]
months = g[1] or '01'
days = g[2] or '01'
hours, minutes, seconds = [x or '00' for x in g[3:6]]
z, tzsign, tzhours, tzminutes = g[6:10]
if z:
tz = timedelta(hours=int(tzhours or '0'),
minutes=int(tzminutes or '0')).seconds / 60
if tz == 0:
tzinfo = utc
else:
tzinfo = FixedOffset(tzsign == '-' and -tz or tz,
'%s%s:%s' %
(tzsign, tzhours, tzminutes))
tm = time.strptime('%s ' * 6 % (years, months, days,
hours, minutes, seconds),
'%Y %m %d %H %M %S ')
dt = tzinfo.localize(datetime(*tm[0:6]))
except ValueError:
pass
if dt is None:
for format in ['%x %X', '%x, %X', '%X %x', '%X, %x', '%x', '%c',
'%b %d, %Y']:
try:
tm = time.strptime(text, format)
dt = tzinfo.localize(datetime(*tm[0:6]))
break
except ValueError:
continue
if dt is None:
dt = _parse_relative_time(text, tzinfo)
if dt is None:
hint = {'datetime': get_datetime_format_hint,
'date': get_date_format_hint}.get(hint, lambda: hint)()
raise TracError(_('"%(date)s" is an invalid date, or the date format '
'is not known. Try "%(hint)s" instead.',
date=text, hint=hint), _('Invalid Date'))
# Make sure we can convert it to a timestamp and back - fromtimestamp()
# may raise ValueError if larger than platform C localtime() or gmtime()
try:
to_datetime(to_timestamp(dt), tzinfo)
except ValueError:
raise TracError(_('The date "%(date)s" is outside valid range. '
'Try a date closer to present time.', date=text),
_('Invalid Date'))
return dt
_REL_TIME_RE = re.compile(
r'(\d+\.?\d*)\s*'
r'(second|minute|hour|day|week|month|year|[hdwmy])s?\s*'
r'(?:ago)?$')
_time_intervals = dict(
second=lambda v: timedelta(seconds=v),
minute=lambda v: timedelta(minutes=v),
hour=lambda v: timedelta(hours=v),
day=lambda v: timedelta(days=v),
week=lambda v: timedelta(weeks=v),
month=lambda v: timedelta(days=30 * v),
year=lambda v: timedelta(days=365 * v),
h=lambda v: timedelta(hours=v),
d=lambda v: timedelta(days=v),
w=lambda v: timedelta(weeks=v),
m=lambda v: timedelta(days=30 * v),
y=lambda v: timedelta(days=365 * v),
)
_TIME_START_RE = re.compile(r'(this|last)\s*'
r'(second|minute|hour|day|week|month|year)$')
_time_starts = dict(
second=lambda now: now.replace(microsecond=0),
minute=lambda now: now.replace(microsecond=0, second=0),
hour=lambda now: now.replace(microsecond=0, second=0, minute=0),
day=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0),
week=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0) \
- timedelta(days=now.weekday()),
month=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0,
day=1),
year=lambda now: now.replace(microsecond=0, second=0, minute=0, hour=0,
day=1, month=1),
)
def _parse_relative_time(text, tzinfo):
now = tzinfo.localize(datetime.now())
if text == 'now':
return now
if text == 'today':
return now.replace(microsecond=0, second=0, minute=0, hour=0)
if text == 'yesterday':
return now.replace(microsecond=0, second=0, minute=0, hour=0) \
- timedelta(days=1)
match = _REL_TIME_RE.match(text)
if match:
(value, interval) = match.groups()
return now - _time_intervals[interval](float(value))
match = _TIME_START_RE.match(text)
if match:
(which, start) = match.groups()
dt = _time_starts[start](now)
if which == 'last':
if start == 'month':
if dt.month > 1:
dt = dt.replace(month=dt.month - 1)
else:
dt = dt.replace(year=dt.year - 1, month=12)
else:
dt -= _time_intervals[start](1)
return dt
return None
# -- timezone utilities
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return _zero
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
STDOFFSET = timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
"""A 'local' time zone implementation"""
def __str__(self):
secs = self.utcoffset(datetime.now()).seconds
hours, rem = divmod(secs, 3600)
return 'UTC%+03d:%02d' % (
(hours, rem / 60),
(hours + 1, (3600 - rem) / 60))[bool(secs < 0 and rem)]
def __repr__(self):
return '<LocalTimezone "%s" %s "%s" %s>' % (
time.tzname[False], STDOFFSET,
time.tzname[True], DSTOFFSET)
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return _zero
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
except OverflowError:
return False
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
utc = FixedOffset(0, 'UTC')
utcmin = datetime.min.replace(tzinfo=utc)
utcmax = datetime.max.replace(tzinfo=utc)
_epoc = datetime(1970, 1, 1, tzinfo=utc)
_zero = timedelta(0)
_min_ts = -(1 << 31)
_max_ts = (1 << 31) - 1
localtz = LocalTimezone()
# Use a makeshift timezone implementation if pytz is not available.
# This implementation only supports fixed offset time zones.
#
_timezones = [
FixedOffset(0, 'UTC'),
FixedOffset(-720, 'GMT -12:00'), FixedOffset(-660, 'GMT -11:00'),
FixedOffset(-600, 'GMT -10:00'), FixedOffset(-540, 'GMT -9:00'),
FixedOffset(-480, 'GMT -8:00'), FixedOffset(-420, 'GMT -7:00'),
FixedOffset(-360, 'GMT -6:00'), FixedOffset(-300, 'GMT -5:00'),
FixedOffset(-240, 'GMT -4:00'), FixedOffset(-180, 'GMT -3:00'),
FixedOffset(-120, 'GMT -2:00'), FixedOffset(-60, 'GMT -1:00'),
FixedOffset(0, 'GMT'), FixedOffset(60, 'GMT +1:00'),
FixedOffset(120, 'GMT +2:00'), FixedOffset(180, 'GMT +3:00'),
FixedOffset(240, 'GMT +4:00'), FixedOffset(300, 'GMT +5:00'),
FixedOffset(360, 'GMT +6:00'), FixedOffset(420, 'GMT +7:00'),
FixedOffset(480, 'GMT +8:00'), FixedOffset(540, 'GMT +9:00'),
FixedOffset(600, 'GMT +10:00'), FixedOffset(660, 'GMT +11:00'),
FixedOffset(720, 'GMT +12:00'), FixedOffset(780, 'GMT +13:00')]
_tzmap = dict([(z.zone, z) for z in _timezones])
all_timezones = [z.zone for z in _timezones]
try:
import pytz
_tzoffsetmap = dict([(tz.utcoffset(None), tz) for tz in _timezones
if tz.zone != 'UTC'])
def timezone(tzname):
tz = get_timezone(tzname)
if not tz:
raise KeyError(tzname)
return tz
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
try:
# if given unicode parameter, pytz.timezone fails with:
# "type() argument 1 must be string, not unicode"
tz = pytz.timezone(to_unicode(tzname).encode('ascii', 'replace'))
except (KeyError, IOError):
tz = _tzmap.get(tzname)
if tz and tzname.startswith('Etc/'):
tz = _tzoffsetmap.get(tz.utcoffset(None))
return tz
_pytz_zones = [tzname for tzname in pytz.common_timezones
if not tzname.startswith('Etc/') and
not tzname.startswith('GMT')]
# insert just the GMT timezones into the pytz zones at the right location
# the pytz zones already include UTC so skip it
from bisect import bisect
_gmt_index = bisect(_pytz_zones, 'GMT')
all_timezones = _pytz_zones[:_gmt_index] + all_timezones[1:] + \
_pytz_zones[_gmt_index:]
except ImportError:
pytz = None
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
return _tzmap[tzname]
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
return _tzmap.get(tzname)
|
# -*- coding: utf-8 -*-
import datetime
import logging
from odoo import _, api, models, fields, exceptions
from odoo.tools.translate import _
# from ..services.currency_getter import Currency_getter_factory
from ..services.update_service_RU_CBRF import RU_CBRF_getter
_logger = logging.getLogger(__name__)
CURRENCY_DOMAIN = [('name', 'in', ['RUB', 'USD', 'EUR'])]
class Currency(models.Model):
_inherit = 'res.currency'
rub_currency_rate = fields.Float(string=u"Курс",
compute='compute_rub_currency',
digits=(12, 4),
)
rate_month = fields.Selection(string=u"Месяц",
selection='get_rate_month_selection',
compute='compute_rate_month_selection',
)
avg_rate = fields.Float(string=u"Средний курс за месяц",
compute='compute_avg_rate',
digits=(12, 4),
)
from_date = fields.Date(string=u'От даты',
default=datetime.date(datetime.date.today().year, 1, 1))
force_refresh = fields.Boolean(string=u'Принудительно')
@property
def rub_id(self):
return self.env.ref('base.RUB')
@property
def eur_id(self):
return self.env.ref('base.EUR')
@property
def usd_id(self):
return self.env.ref('base.USD')
@api.v8
def compute_rub(self, from_amount, round=True):
return self.compute(from_amount, self.rub_id, round)
@api.v8
def compute_eur(self, from_amount, round=True):
return self.compute(from_amount, self.eur_id, round)
@api.v8
def compute_usd(self, from_amount, round=True):
return self.compute(from_amount, self.usd_id, round)
@api.multi
def compute_rub_currency(self):
for rec in self:
if rec.rate:
rec.rub_currency_rate = 1. / rec.rate
@api.model
def get_rate_month_selection(self):
"""rate_start = fields.Date.from_string(self.env['res.currency.rate'].search([('currency_id.name', 'in', ['USD', 'EUR', 'RUB'])],
order='name', limit=1).name)"""
end = datetime.date.today()
rate_start = datetime.date(end.year - 1, end.month, 1)
sel_month = datetime.date(rate_start.year, rate_start.month, 1)
months_sel = []
while sel_month <= end:
if sel_month.month == 12:
next_month = datetime.date(sel_month.year + 1, 1, 1)
else:
next_month = datetime.date(sel_month.year, sel_month.month + 1, 1)
sel_val = '%s,%s' % (fields.Date.to_string(sel_month),
fields.Date.to_string(next_month))
months_sel.append((sel_val, sel_month.strftime('%Y.%m')))
sel_month = next_month
return months_sel
@api.multi
def compute_rate_month_selection(self):
rate_month = self.get_rate_month_selection()[-1][0]
for rec in self:
rec.rate_month = rate_month
@api.one
@api.onchange('rate_month')
def compute_avg_rate(self):
if self.name == 'RUB':
self.avg_rate = 1
elif self.rate_month:
start, end = self.rate_month.split(',')
month_rates = self.env['res.currency.rate'].search(
[('currency_id.name', '=', self.name),
('name', '>=', start),
('name', '<', end)])
if month_rates:
rates = [rate.rate for rate in month_rates]
avg_rate = sum(rates) / float(len(rates))
self.avg_rate = 1. / avg_rate
else:
self.avg_rate = 0
@api.one
def refrech_empty_date_rates(self):
current_service = 'RU_CBRF'
if self.name == 'RUB':
raise exceptions.Warning(
'Данная валюта не поддерживается: RUB')
# factory = Currency_getter_factory()
# getter = factory.register(current_service)
getter = RU_CBRF_getter()
today = datetime.date.today()
date = fields.Date.from_string(self.from_date)
if self.force_refresh:
rec_dates = set()
else:
rec_dates = set(self.env['res.currency.rate'].search(
[('currency_id', '=', self.id),
('name', '>=', fields.Datetime.to_string(date))]).mapped('name'))
all_dates = set()
while date <= today:
all_dates.add(fields.Datetime.to_string(date))
date += datetime.timedelta(1)
dates = all_dates - rec_dates
for d in sorted(dates):
try:
date_req = datetime.datetime.strptime(
d[:10], '%Y-%m-%d').strftime('%d/%m/%Y')
res, log_info = getter.get_updated_currency(
[self.name],
None,
None,
date_req=date_req)
vals = {
'currency_id': self.id,
'rate': res[self.name],
'name': d
}
rec = self.env['res.currency.rate'].search([('currency_id', '=', self.id),
('name', '=', d)], limit=1)
if rec:
rec.write({'rate': res[self.name]})
else:
self.env['res.currency.rate'].create(vals)
except Exception as exc:
_logger.info(repr(exc))
rec = self.env['currency.rate.update.service'].search(
[('service', '=', current_service)],
limit=1)
if rec:
error_msg = '\n%s ERROR : %s %s' % (
fields.Datetime.to_string(datetime.datetime.today()),
repr(exc), rec.note or '')
rec.write({'note': error_msg})
@api.model
def check_rates(self):
default_param = 3
try:
currency_days_with_not_rates = int(self.env['ir.config_parameter'].get_param('currency_days_with_not_rates', default_param))
except ValueError:
currency_days_with_not_rates = default_param
today = datetime.date.today()
date = today - datetime.timedelta(currency_days_with_not_rates)
admin = self.env['res.users'].browse(1)
Mail = self.env['mail.mail']
domain = CURRENCY_DOMAIN + [('name', '!=', 'RUB')]
for cur in self.search(domain):
recs = self.env['res.currency.rate'].search(
[('currency_id', '=', cur.id),
('name', '>=', fields.Datetime.to_string(date))])
if recs:
continue
message = u'<div>Валюта {} не обновлялась c {}.</div>'.format(cur.name,
date.strftime('%d-%m-%Y'))
mess = Mail.create({
'email_to': admin.email,
'subject': u'Нет обновления валюты {}!'.format(cur.name),
'body_html': message})
mess.send()
class Rate(models.Model):
_inherit = "res.currency.rate"
rub_currency_rate = fields.Float(string=u"Курс",
compute='compute_rub_currency',
digits=(12, 4),
)
rate = fields.Float(digits=(12, 8))
@api.multi
def compute_rub_currency(self):
for rec in self:
if rec.rate:
rec.rub_currency_rate = 1. / rec.rate
Added additional information to mail in currency rate checking
# -*- coding: utf-8 -*-
import datetime
import logging
import socket
import os
from odoo import _, api, models, fields, exceptions
from odoo.tools.translate import _
# from ..services.currency_getter import Currency_getter_factory
from ..services.update_service_RU_CBRF import RU_CBRF_getter
_logger = logging.getLogger(__name__)
CURRENCY_DOMAIN = [('name', 'in', ['RUB', 'USD', 'EUR'])]
class Currency(models.Model):
_inherit = 'res.currency'
rub_currency_rate = fields.Float(string=u"Курс",
compute='compute_rub_currency',
digits=(12, 4),
)
rate_month = fields.Selection(string=u"Месяц",
selection='get_rate_month_selection',
compute='compute_rate_month_selection',
)
avg_rate = fields.Float(string=u"Средний курс за месяц",
compute='compute_avg_rate',
digits=(12, 4),
)
from_date = fields.Date(string=u'От даты',
default=datetime.date(datetime.date.today().year, 1, 1))
force_refresh = fields.Boolean(string=u'Принудительно')
@property
def rub_id(self):
return self.env.ref('base.RUB')
@property
def eur_id(self):
return self.env.ref('base.EUR')
@property
def usd_id(self):
return self.env.ref('base.USD')
@api.v8
def compute_rub(self, from_amount, round=True):
return self.compute(from_amount, self.rub_id, round)
@api.v8
def compute_eur(self, from_amount, round=True):
return self.compute(from_amount, self.eur_id, round)
@api.v8
def compute_usd(self, from_amount, round=True):
return self.compute(from_amount, self.usd_id, round)
@api.multi
def compute_rub_currency(self):
for rec in self:
if rec.rate:
rec.rub_currency_rate = 1. / rec.rate
@api.model
def get_rate_month_selection(self):
"""rate_start = fields.Date.from_string(self.env['res.currency.rate'].search([('currency_id.name', 'in', ['USD', 'EUR', 'RUB'])],
order='name', limit=1).name)"""
end = datetime.date.today()
rate_start = datetime.date(end.year - 1, end.month, 1)
sel_month = datetime.date(rate_start.year, rate_start.month, 1)
months_sel = []
while sel_month <= end:
if sel_month.month == 12:
next_month = datetime.date(sel_month.year + 1, 1, 1)
else:
next_month = datetime.date(sel_month.year, sel_month.month + 1, 1)
sel_val = '%s,%s' % (fields.Date.to_string(sel_month),
fields.Date.to_string(next_month))
months_sel.append((sel_val, sel_month.strftime('%Y.%m')))
sel_month = next_month
return months_sel
@api.multi
def compute_rate_month_selection(self):
rate_month = self.get_rate_month_selection()[-1][0]
for rec in self:
rec.rate_month = rate_month
@api.one
@api.onchange('rate_month')
def compute_avg_rate(self):
if self.name == 'RUB':
self.avg_rate = 1
elif self.rate_month:
start, end = self.rate_month.split(',')
month_rates = self.env['res.currency.rate'].search(
[('currency_id.name', '=', self.name),
('name', '>=', start),
('name', '<', end)])
if month_rates:
rates = [rate.rate for rate in month_rates]
avg_rate = sum(rates) / float(len(rates))
self.avg_rate = 1. / avg_rate
else:
self.avg_rate = 0
@api.one
def refrech_empty_date_rates(self):
current_service = 'RU_CBRF'
if self.name == 'RUB':
raise exceptions.Warning(
'Данная валюта не поддерживается: RUB')
# factory = Currency_getter_factory()
# getter = factory.register(current_service)
getter = RU_CBRF_getter()
today = datetime.date.today()
date = fields.Date.from_string(self.from_date)
if self.force_refresh:
rec_dates = set()
else:
rec_dates = set(self.env['res.currency.rate'].search(
[('currency_id', '=', self.id),
('name', '>=', fields.Datetime.to_string(date))]).mapped('name'))
all_dates = set()
while date <= today:
all_dates.add(fields.Datetime.to_string(date))
date += datetime.timedelta(1)
dates = all_dates - rec_dates
for d in sorted(dates):
try:
date_req = datetime.datetime.strptime(
d[:10], '%Y-%m-%d').strftime('%d/%m/%Y')
res, log_info = getter.get_updated_currency(
[self.name],
None,
None,
date_req=date_req)
vals = {
'currency_id': self.id,
'rate': res[self.name],
'name': d
}
rec = self.env['res.currency.rate'].search([('currency_id', '=', self.id),
('name', '=', d)], limit=1)
if rec:
rec.write({'rate': res[self.name]})
else:
self.env['res.currency.rate'].create(vals)
except Exception as exc:
_logger.info(repr(exc))
rec = self.env['currency.rate.update.service'].search(
[('service', '=', current_service)],
limit=1)
if rec:
error_msg = '\n%s ERROR : %s %s' % (
fields.Datetime.to_string(datetime.datetime.today()),
repr(exc), rec.note or '')
rec.write({'note': error_msg})
@api.model
def check_rates(self):
default_param = 3
try:
currency_days_with_not_rates = int(self.env['ir.config_parameter'].get_param('currency_days_with_not_rates', default_param))
except ValueError:
currency_days_with_not_rates = default_param
today = datetime.date.today()
date = today - datetime.timedelta(currency_days_with_not_rates)
admin = self.env['res.users'].browse(1)
Mail = self.env['mail.mail']
domain = CURRENCY_DOMAIN + [('name', '!=', 'RUB')]
for cur in self.search(domain):
_logger.info('Begin update for currency {}'.format(cur.name))
recs = self.env['res.currency.rate'].search(
[('currency_id', '=', cur.id),
('name', '>=', fields.Datetime.to_string(date))])
if recs:
continue
cron_currency_update_id = self.env.ref('currency_rate_update.ir_cron_currency_update_every_day')
last_run_date = fields.Datetime.from_string(cron_currency_update_id.nextcall) - datetime.timedelta(days=1)
hostname = socket.gethostname()
work_dir = os.abspath(__file__)
message = u'<div>Валюта {} не обновлялась c {}.</div>'\
u'<div>Последний запуск обновления: {}</div>'\
u'<div>Следующий запуск обновления: {}</div>'\
u'<div>Хост: {}</div>'\
u'<div>Директория: {}</div>'.format(cur.name, date.strftime('%d-%m-%Y'),
last_run_date.strftime('%d-%m-%Y'),
cron_currency_update_id.nextcall,
hostname, work_dir)
mess = Mail.create({
'email_to': admin.email,
'subject': u'Нет обновления валюты {}!'.format(cur.name),
'body_html': message})
mess.send()
_logger.info('Letter sent about currency {} update.'.format(cur.name))
class Rate(models.Model):
_inherit = "res.currency.rate"
rub_currency_rate = fields.Float(string=u"Курс",
compute='compute_rub_currency',
digits=(12, 4),
)
rate = fields.Float(digits=(12, 8))
@api.multi
def compute_rub_currency(self):
for rec in self:
if rec.rate:
rec.rub_currency_rate = 1. / rec.rate
|
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for uvdata object."""
import pytest
import os
import copy
import itertools
import h5py
import numpy as np
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from pyuvdata import UVData, UVCal
import pyuvdata.utils as uvutils
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# needed for multifile read error test
from pyuvdata.uvdata.tests.test_mwa_corr_fits import filelist as mwa_corr_files
from pyuvdata.uvdata.tests.test_fhd import testfiles as fhd_files
from collections import Counter
@pytest.fixture(scope="function")
def uvdata_props():
required_parameters = [
"_data_array",
"_nsample_array",
"_flag_array",
"_Ntimes",
"_Nbls",
"_Nblts",
"_Nfreqs",
"_Npols",
"_Nspws",
"_uvw_array",
"_time_array",
"_ant_1_array",
"_ant_2_array",
"_lst_array",
"_baseline_array",
"_freq_array",
"_polarization_array",
"_spw_array",
"_integration_time",
"_channel_width",
"_object_name",
"_telescope_name",
"_instrument",
"_telescope_location",
"_history",
"_vis_units",
"_Nants_data",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_type",
]
required_properties = [
"data_array",
"nsample_array",
"flag_array",
"Ntimes",
"Nbls",
"Nblts",
"Nfreqs",
"Npols",
"Nspws",
"uvw_array",
"time_array",
"ant_1_array",
"ant_2_array",
"lst_array",
"baseline_array",
"freq_array",
"polarization_array",
"spw_array",
"integration_time",
"channel_width",
"object_name",
"telescope_name",
"instrument",
"telescope_location",
"history",
"vis_units",
"Nants_data",
"Nants_telescope",
"antenna_names",
"antenna_numbers",
"antenna_positions",
"phase_type",
]
extra_parameters = [
"_extra_keywords",
"_x_orientation",
"_antenna_diameters",
"_blt_order",
"_gst0",
"_rdate",
"_earth_omega",
"_dut1",
"_timesys",
"_uvplane_reference_time",
"_phase_center_ra",
"_phase_center_dec",
"_phase_center_epoch",
"_phase_center_frame",
"_eq_coeffs",
"_eq_coeffs_convention",
]
extra_properties = [
"extra_keywords",
"x_orientation",
"antenna_diameters",
"blt_order",
"gst0",
"rdate",
"earth_omega",
"dut1",
"timesys",
"uvplane_reference_time",
"phase_center_ra",
"phase_center_dec",
"phase_center_epoch",
"phase_center_frame",
"eq_coeffs",
"eq_coeffs_convention",
]
other_properties = [
"telescope_location_lat_lon_alt",
"telescope_location_lat_lon_alt_degrees",
"phase_center_ra_degrees",
"phase_center_dec_degrees",
"pyuvdata_version_str",
]
uv_object = UVData()
class DataHolder:
def __init__(
self,
uv_object,
required_parameters,
required_properties,
extra_parameters,
extra_properties,
other_properties,
):
self.uv_object = uv_object
self.required_parameters = required_parameters
self.required_properties = required_properties
self.extra_parameters = extra_parameters
self.extra_properties = extra_properties
self.other_properties = other_properties
uvdata_props = DataHolder(
uv_object,
required_parameters,
required_properties,
extra_parameters,
extra_properties,
other_properties,
)
# yields the data we need but will continue to the del call after tests
yield uvdata_props
# some post-test object cleanup
del uvdata_props
return
@pytest.fixture(scope="session")
def hera_uvh5_master():
# read in test file for the resampling in time functions
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")
uv_object.read(testfile)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def hera_uvh5(hera_uvh5_master):
# read in test file for the resampling in time functions
uv_object = hera_uvh5_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="session")
def paper_uvh5_master():
# read in test file for the resampling in time functions
uv_object = UVData()
uvh5_file = os.path.join(DATA_PATH, "zen.2456865.60537.xy.uvcRREAA.uvh5")
uv_object.read_uvh5(uvh5_file)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def paper_uvh5(paper_uvh5_master):
# read in test file for the resampling in time functions
uv_object = paper_uvh5_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="session")
def bda_test_file_master():
# read in test file for BDA-like data
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "simulated_bda_file.uvh5")
uv_object.read(testfile)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def bda_test_file(bda_test_file_master):
# read in test file for BDA-like data
uv_object = bda_test_file_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def uvdata_data(casa_uvfits):
uv_object = casa_uvfits
class DataHolder:
def __init__(self, uv_object):
self.uv_object = uv_object
self.uv_object2 = uv_object.copy()
uvdata_data = DataHolder(uv_object)
# yields the data we need but will continue to the del call after tests
yield uvdata_data
# some post-test object cleanup
del uvdata_data
return
@pytest.fixture(scope="function")
def uvdata_baseline():
uv_object = UVData()
uv_object.Nants_telescope = 128
uv_object2 = UVData()
uv_object2.Nants_telescope = 2049
class DataHolder:
def __init__(self, uv_object, uv_object2):
self.uv_object = uv_object
self.uv_object2 = uv_object2
uvdata_baseline = DataHolder(uv_object, uv_object2)
# yields the data we need but will continue to the del call after tests
yield uvdata_baseline
# Post test clean-up
del uvdata_baseline
return
@pytest.fixture(scope="session")
def set_uvws_master(hera_uvh5_master):
uv1 = hera_uvh5_master.copy()
# uvws in the file are wrong. reset them.
uv1.set_uvws_from_antenna_positions()
yield uv1
del uv1
return
@pytest.fixture
def uv1_2_set_uvws(set_uvws_master):
uv1 = set_uvws_master.copy()
uv2 = set_uvws_master.copy()
yield uv1, uv2
del uv1, uv2
return
@pytest.fixture()
def uv_phase_time_split(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.reorder_blts(order="time", minor_order="baseline")
uv_raw.reorder_blts(order="time", minor_order="baseline")
uv_phase.phase(ra=0, dec=0, epoch="J2000", use_ant_pos=True)
times = np.unique(uv_phase.time_array)
time_set_1, time_set_2 = times[::2], times[1::2]
uv_phase_1 = uv_phase.select(times=time_set_1, inplace=False)
uv_phase_2 = uv_phase.select(times=time_set_2, inplace=False)
uv_raw_1 = uv_raw.select(times=time_set_1, inplace=False)
uv_raw_2 = uv_raw.select(times=time_set_2, inplace=False)
yield uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw
del uv_phase_1, uv_phase_2, uv_raw_1, uv_raw_2, uv_phase, uv_raw
def test_parameter_iter(uvdata_props):
"""Test expected parameters."""
all_params = []
for prop in uvdata_props.uv_object:
all_params.append(prop)
for a in uvdata_props.required_parameters + uvdata_props.extra_parameters:
assert a in all_params, (
"expected attribute " + a + " not returned in object iterator"
)
def test_required_parameter_iter(uvdata_props):
"""Test expected required parameters."""
# at first it's a metadata_only object, so need to modify required_parameters
required = []
for prop in uvdata_props.uv_object.required():
required.append(prop)
expected_required = copy.copy(uvdata_props.required_parameters)
expected_required.remove("_data_array")
expected_required.remove("_nsample_array")
expected_required.remove("_flag_array")
for a in expected_required:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
uvdata_props.uv_object.data_array = 1
uvdata_props.uv_object.nsample_array = 1
uvdata_props.uv_object.flag_array = 1
required = []
for prop in uvdata_props.uv_object.required():
required.append(prop)
for a in uvdata_props.required_parameters:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
def test_extra_parameter_iter(uvdata_props):
"""Test expected optional parameters."""
extra = []
for prop in uvdata_props.uv_object.extra():
extra.append(prop)
for a in uvdata_props.extra_parameters:
assert a in extra, "expected attribute " + a + " not returned in extra iterator"
def test_unexpected_parameters(uvdata_props):
"""Test for extra parameters."""
expected_parameters = (
uvdata_props.required_parameters + uvdata_props.extra_parameters
)
attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] == "_"]
for a in attributes:
assert a in expected_parameters, (
"unexpected parameter " + a + " found in UVData"
)
def test_unexpected_attributes(uvdata_props):
"""Test for extra attributes."""
expected_attributes = (
uvdata_props.required_properties
+ uvdata_props.extra_properties
+ uvdata_props.other_properties
)
attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] != "_"]
for a in attributes:
assert a in expected_attributes, (
"unexpected attribute " + a + " found in UVData"
)
def test_properties(uvdata_props):
"""Test that properties can be get and set properly."""
prop_dict = dict(
list(
zip(
uvdata_props.required_properties + uvdata_props.extra_properties,
uvdata_props.required_parameters + uvdata_props.extra_parameters,
)
)
)
for k, v in prop_dict.items():
rand_num = np.random.rand()
setattr(uvdata_props.uv_object, k, rand_num)
this_param = getattr(uvdata_props.uv_object, v)
try:
assert rand_num == this_param.value
except AssertionError:
print("setting {prop_name} to a random number failed".format(prop_name=k))
raise
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_metadata_only_property(uvdata_data):
uvdata_data.uv_object.data_array = None
assert uvdata_data.uv_object.metadata_only is False
pytest.raises(ValueError, uvdata_data.uv_object.check)
uvdata_data.uv_object.flag_array = None
assert uvdata_data.uv_object.metadata_only is False
pytest.raises(ValueError, uvdata_data.uv_object.check)
uvdata_data.uv_object.nsample_array = None
assert uvdata_data.uv_object.metadata_only is True
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_equality(uvdata_data):
"""Basic equality test."""
assert uvdata_data.uv_object == uvdata_data.uv_object
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check(uvdata_data):
"""Test simple check function."""
assert uvdata_data.uv_object.check()
# Check variety of special cases
uvdata_data.uv_object.Nants_data += 1
with pytest.raises(
ValueError,
match=(
"Nants_data must be equal to the number of unique values in "
"ant_1_array and ant_2_array"
),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Nants_data -= 1
uvdata_data.uv_object.Nbls += 1
with pytest.raises(
ValueError,
match=(
"Nbls must be equal to the number of unique baselines in the data_array"
),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Nbls -= 1
uvdata_data.uv_object.Ntimes += 1
with pytest.raises(
ValueError,
match=("Ntimes must be equal to the number of unique times in the time_array"),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Ntimes -= 1
with pytest.raises(
ValueError,
match=(
"The uvw_array does not match the expected values given the antenna "
"positions."
),
):
uvdata_data.uv_object.check(strict_uvw_antpos_check=True)
# Check case where all data is autocorrelations
# Currently only test files that have autos are fhd files
testdir = os.path.join(DATA_PATH, "fhd_vis_data/")
file_list = [
testdir + "1061316296_flags.sav",
testdir + "1061316296_vis_XX.sav",
testdir + "1061316296_params.sav",
testdir + "1061316296_layout.sav",
testdir + "1061316296_settings.txt",
]
uvdata_data.uv_object.read_fhd(file_list)
uvdata_data.uv_object.select(
blt_inds=np.where(
uvdata_data.uv_object.ant_1_array == uvdata_data.uv_object.ant_2_array
)[0]
)
assert uvdata_data.uv_object.check()
# test auto and cross corr uvw_array
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
autos = np.isclose(uvd.ant_1_array - uvd.ant_2_array, 0.0)
auto_inds = np.where(autos)[0]
cross_inds = np.where(~autos)[0]
# make auto have non-zero uvw coords, assert ValueError
uvd.uvw_array[auto_inds[0], 0] = 0.1
with pytest.raises(
ValueError,
match=("Some auto-correlations have non-zero uvw_array coordinates."),
):
uvd.check()
# make cross have |uvw| zero, assert ValueError
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
uvd.uvw_array[cross_inds[0]][:] = 0.0
with pytest.raises(
ValueError,
match=("Some cross-correlations have near-zero uvw_array magnitudes."),
):
uvd.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_nants_data_telescope_larger(uvdata_data):
# make sure it's okay for Nants_telescope to be strictly greater than Nants_data
uvdata_data.uv_object.Nants_telescope += 1
# add dummy information for "new antenna" to pass object check
uvdata_data.uv_object.antenna_names = np.concatenate(
(uvdata_data.uv_object.antenna_names, ["dummy_ant"])
)
uvdata_data.uv_object.antenna_numbers = np.concatenate(
(uvdata_data.uv_object.antenna_numbers, [20])
)
uvdata_data.uv_object.antenna_positions = np.concatenate(
(uvdata_data.uv_object.antenna_positions, np.zeros((1, 3))), axis=0
)
assert uvdata_data.uv_object.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_ant1_array_not_in_antnums(uvdata_data):
# make sure an error is raised if antennas in ant_1_array not in antenna_numbers
# remove antennas from antenna_names & antenna_numbers by hand
uvdata_data.uv_object.antenna_names = uvdata_data.uv_object.antenna_names[1:]
uvdata_data.uv_object.antenna_numbers = uvdata_data.uv_object.antenna_numbers[1:]
uvdata_data.uv_object.antenna_positions = uvdata_data.uv_object.antenna_positions[
1:, :
]
uvdata_data.uv_object.Nants_telescope = uvdata_data.uv_object.antenna_numbers.size
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.check()
assert str(cm.value).startswith(
"All antennas in ant_1_array must be in antenna_numbers"
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_ant2_array_not_in_antnums(uvdata_data):
# make sure an error is raised if antennas in ant_2_array not in antenna_numbers
# remove antennas from antenna_names & antenna_numbers by hand
uvobj = uvdata_data.uv_object
uvobj.antenna_names = uvobj.antenna_names[:-1]
uvobj.antenna_numbers = uvobj.antenna_numbers[:-1]
uvobj.antenna_positions = uvobj.antenna_positions[:-1]
uvobj.Nants_telescope = uvobj.antenna_numbers.size
with pytest.raises(ValueError) as cm:
uvobj.check()
assert str(cm.value).startswith(
"All antennas in ant_2_array must be in antenna_numbers"
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_converttofiletype(uvdata_data):
fhd_obj = uvdata_data.uv_object._convert_to_filetype("fhd")
uvdata_data.uv_object._convert_from_filetype(fhd_obj)
assert uvdata_data.uv_object == uvdata_data.uv_object2
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object._convert_to_filetype("foo")
assert str(cm.value).startswith("filetype must be uvfits, miriad, fhd, or uvh5")
def test_baseline_to_antnums(uvdata_baseline):
"""Test baseline to antnum conversion for 256 & larger conventions."""
assert uvdata_baseline.uv_object.baseline_to_antnums(67585) == (0, 0)
with pytest.raises(Exception) as cm:
uvdata_baseline.uv_object2.baseline_to_antnums(67585)
assert str(cm.value).startswith(
"error Nants={Nants}>2048"
" not supported".format(Nants=uvdata_baseline.uv_object2.Nants_telescope)
)
ant_pairs = [(10, 20), (280, 310)]
for pair in ant_pairs:
if np.max(np.array(pair)) < 255:
bl = uvdata_baseline.uv_object.antnums_to_baseline(
pair[0], pair[1], attempt256=True
)
ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)
assert pair == ant_pair_out
bl = uvdata_baseline.uv_object.antnums_to_baseline(
pair[0], pair[1], attempt256=False
)
ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)
assert pair == ant_pair_out
def test_baseline_to_antnums_vectorized(uvdata_baseline):
"""Test vectorized antnum to baseline conversion."""
ant_1 = [10, 280]
ant_2 = [20, 310]
baseline_array = uvdata_baseline.uv_object.antnums_to_baseline(ant_1, ant_2)
assert np.array_equal(baseline_array, [88085, 641335])
ant_1_out, ant_2_out = uvdata_baseline.uv_object.baseline_to_antnums(
baseline_array.tolist()
)
assert np.array_equal(ant_1, ant_1_out)
assert np.array_equal(ant_2, ant_2_out)
def test_antnums_to_baselines(uvdata_baseline):
"""Test antums to baseline conversion for 256 & larger conventions."""
assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0) == 67585
assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177
assert uvdata_baseline.uv_object.baseline_to_antnums(594177) == (257, 256)
# Check attempt256
assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0, attempt256=True) == 257
assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177
uvtest.checkWarnings(
uvdata_baseline.uv_object.antnums_to_baseline,
[257, 256],
{"attempt256": True},
message="found > 256 antennas",
)
pytest.raises(Exception, uvdata_baseline.uv_object2.antnums_to_baseline, 0, 0)
# check a len-1 array returns as an array
ant1 = np.array([1])
ant2 = np.array([2])
assert isinstance(
uvdata_baseline.uv_object.antnums_to_baseline(ant1, ant2), np.ndarray
)
def test_known_telescopes():
"""Test known_telescopes method returns expected results."""
uv_object = UVData()
known_telescopes = ["PAPER", "HERA", "MWA"]
# calling np.sort().tolist() because [].sort() acts inplace and returns None
# Before test had None == None
assert (
np.sort(known_telescopes).tolist()
== np.sort(uv_object.known_telescopes()).tolist()
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_hera_diameters(paper_uvh5):
uv_in = paper_uvh5
uv_in.telescope_name = "HERA"
uvtest.checkWarnings(
uv_in.set_telescope_params,
message="antenna_diameters " "is not set. Using known values for HERA.",
)
assert uv_in.telescope_name == "HERA"
assert uv_in.antenna_diameters is not None
uv_in.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_generic_read():
uv_in = UVData()
uvfits_file = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
uv_in.read(uvfits_file, read_data=False)
unique_times = np.unique(uv_in.time_array)
pytest.raises(
ValueError,
uv_in.read,
uvfits_file,
times=unique_times[0:2],
time_range=[unique_times[0], unique_times[1]],
)
pytest.raises(
ValueError,
uv_in.read,
uvfits_file,
antenna_nums=uv_in.antenna_numbers[0],
antenna_names=uv_in.antenna_names[1],
)
pytest.raises(ValueError, uv_in.read, "foo")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_kwargs",
[
{"ra": 0.0, "dec": 0.0, "epoch": "J2000"},
{"ra": Angle("5d").rad, "dec": Angle("30d").rad, "phase_frame": "gcrs"},
{
"ra": Angle("180d").rad,
"dec": Angle("90d"),
"epoch": Time("2010-01-01T00:00:00", format="isot", scale="utc"),
},
],
)
def test_phase_unphase_hera(uv1_2_set_uvws, phase_kwargs):
"""
Read in drift data, phase to an RA/DEC, unphase and check for object equality.
"""
uv1, uv_raw = uv1_2_set_uvws
uv1.phase(**phase_kwargs)
uv1.unphase_to_drift()
# check that phase + unphase gets back to raw
assert uv_raw == uv1
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_one_bl(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that phase + unphase work with one baseline
uv_raw_small = uv_raw.select(blt_inds=[0], inplace=False)
uv_phase_small = uv_raw_small.copy()
uv_phase_small.phase(Angle("23h").rad, Angle("15d").rad)
uv_phase_small.unphase_to_drift()
assert uv_raw_small == uv_phase_small
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_antpos(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that they match if you phase & unphase using antenna locations
# first replace the uvws with the right values
antenna_enu = uvutils.ENU_from_ECEF(
(uv_raw.antenna_positions + uv_raw.telescope_location),
*uv_raw.telescope_location_lat_lon_alt,
)
uvw_calc = np.zeros_like(uv_raw.uvw_array)
unique_times, unique_inds = np.unique(uv_raw.time_array, return_index=True)
for ind, jd in enumerate(unique_times):
inds = np.where(uv_raw.time_array == jd)[0]
for bl_ind in inds:
wh_ant1 = np.where(uv_raw.antenna_numbers == uv_raw.ant_1_array[bl_ind])
ant1_index = wh_ant1[0][0]
wh_ant2 = np.where(uv_raw.antenna_numbers == uv_raw.ant_2_array[bl_ind])
ant2_index = wh_ant2[0][0]
uvw_calc[bl_ind, :] = (
antenna_enu[ant2_index, :] - antenna_enu[ant1_index, :]
)
uv_raw_new = uv_raw.copy()
uv_raw_new.uvw_array = uvw_calc
uv_phase.phase(0.0, 0.0, epoch="J2000", use_ant_pos=True)
uv_phase2 = uv_raw_new.copy()
uv_phase2.phase(0.0, 0.0, epoch="J2000")
# The uvw's only agree to ~1mm. should they be better?
assert np.allclose(uv_phase2.uvw_array, uv_phase.uvw_array, atol=1e-3)
# the data array are just multiplied by the w's for phasing, so a difference
# at the 1e-3 level makes the data array different at that level too.
# -> change the tolerance on data_array for this test
uv_phase2._data_array.tols = (0, 1e-3 * np.amax(np.abs(uv_phase2.data_array)))
assert uv_phase2 == uv_phase
# check that phase + unphase gets back to raw using antpos
uv_phase.unphase_to_drift(use_ant_pos=True)
assert uv_raw_new == uv_phase
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_hera_zenith_timestamp_minimal_changes(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that phasing to zenith with one timestamp has small changes
# (it won't be identical because of precession/nutation changing the
# coordinate axes)
# use gcrs rather than icrs to reduce differences (don't include abberation)
uv_raw_small = uv_raw.select(times=uv_raw.time_array[0], inplace=False)
uv_phase_simple_small = uv_raw_small.copy()
uv_phase_simple_small.phase_to_time(
time=Time(uv_raw.time_array[0], format="jd"), phase_frame="gcrs"
)
# it's unclear to me how close this should be...
assert np.allclose(
uv_phase_simple_small.uvw_array, uv_raw_small.uvw_array, atol=1e-1
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_to_time_jd_input(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.phase_to_time(uv_raw.time_array[0])
uv_phase.unphase_to_drift()
assert uv_phase == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_to_time_error(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check error if not passing a Time object to phase_to_time
with pytest.raises(TypeError) as cm:
uv_phase.phase_to_time("foo")
assert str(cm.value).startswith("time must be an astropy.time.Time object")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_unphase_drift_data_error(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check error if not passing a Time object to phase_to_time
with pytest.raises(ValueError) as cm:
uv_phase.unphase_to_drift()
assert str(cm.value).startswith("The data is already drift scanning;")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_func,phase_kwargs,err_msg",
[
(
"unphase_to_drift",
{},
"The phasing type of the data is unknown. Set the phase_type",
),
(
"phase",
{"ra": 0, "dec": 0, "epoch": "J2000", "allow_rephase": False},
"The phasing type of the data is unknown. Set the phase_type",
),
(
"phase_to_time",
{"time": 0, "allow_rephase": False},
"The phasing type of the data is unknown. Set the phase_type",
),
],
)
def test_unknown_phase_unphase_hera_errors(
uv1_2_set_uvws, phase_func, phase_kwargs, err_msg
):
uv_phase, uv_raw = uv1_2_set_uvws
# Set phase type to unkown on some tests, ignore on others.
uv_phase._set_unknown_phase_type()
# if this is phase_to_time, use this index set in the dictionary and
# assign the value of the time_array associated with that index
# this is a little hacky, but we cannot acces uv_phase.time_array in the
# parametrize
if phase_func == "phase_to_time":
phase_kwargs["time"] = uv_phase.time_array[phase_kwargs["time"]]
with pytest.raises(ValueError) as cm:
getattr(uv_phase, phase_func)(**phase_kwargs)
assert str(cm.value).startswith(err_msg)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_func,phase_kwargs,err_msg",
[
(
"phase",
{"ra": 0, "dec": 0, "epoch": "J2000", "allow_rephase": False},
"The data is already phased;",
),
(
"phase_to_time",
{"time": 0, "allow_rephase": False},
"The data is already phased;",
),
],
)
def test_phase_rephase_hera_errors(uv1_2_set_uvws, phase_func, phase_kwargs, err_msg):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.phase(0.0, 0.0, epoch="J2000")
# if this is phase_to_time, use this index set in the dictionary and
# assign the value of the time_array associated with that index
# this is a little hacky, but we cannot acces uv_phase.time_array in the
# parametrize
if phase_func == "phase_to_time":
phase_kwargs["time"] = uv_phase.time_array[int(phase_kwargs["time"])]
with pytest.raises(ValueError) as cm:
getattr(uv_phase, phase_func)(**phase_kwargs)
assert str(cm.value).startswith(err_msg)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_bad_frame(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check errors when trying to phase to an unsupported frame
with pytest.raises(ValueError) as cm:
uv_phase.phase(0.0, 0.0, epoch="J2000", phase_frame="cirs")
assert str(cm.value).startswith("phase_frame can only be set to icrs or gcrs.")
def test_phasing():
"""Use MWA files phased to 2 different places to test phasing."""
file1 = os.path.join(DATA_PATH, "1133866760.uvfits")
file2 = os.path.join(DATA_PATH, "1133866760_rephase.uvfits")
uvd1 = UVData()
uvd2 = UVData()
uvd1.read_uvfits(file1)
uvd2.read_uvfits(file2)
uvd1_drift = uvd1.copy()
uvd1_drift.unphase_to_drift(phase_frame="gcrs")
uvd1_drift_antpos = uvd1.copy()
uvd1_drift_antpos.unphase_to_drift(phase_frame="gcrs", use_ant_pos=True)
uvd2_drift = uvd2.copy()
uvd2_drift.unphase_to_drift(phase_frame="gcrs")
uvd2_drift_antpos = uvd2.copy()
uvd2_drift_antpos.unphase_to_drift(phase_frame="gcrs", use_ant_pos=True)
# the tolerances here are empirical -- based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1_drift.uvw_array, uvd2_drift.uvw_array, atol=2e-2)
assert np.allclose(uvd1_drift_antpos.uvw_array, uvd2_drift_antpos.uvw_array)
uvd2_rephase = uvd2.copy()
uvd2_rephase.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
orig_phase_frame="gcrs",
phase_frame="gcrs",
)
uvd2_rephase_antpos = uvd2.copy()
uvd2_rephase_antpos.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
orig_phase_frame="gcrs",
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1.uvw_array, uvd2_rephase.uvw_array, atol=2e-2)
assert np.allclose(uvd1.uvw_array, uvd2_rephase_antpos.uvw_array, atol=5e-3)
# rephase the drift objects to the original pointing and verify that they
# match
uvd1_drift.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
phase_frame="gcrs",
)
uvd1_drift_antpos.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- caused by one unphase/phase cycle.
# the antpos-based phasing differences are based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1.uvw_array, uvd1_drift.uvw_array, atol=1e-4)
assert np.allclose(uvd1.uvw_array, uvd1_drift_antpos.uvw_array, atol=5e-3)
uvd2_drift.phase(
uvd2.phase_center_ra,
uvd2.phase_center_dec,
uvd2.phase_center_epoch,
phase_frame="gcrs",
)
uvd2_drift_antpos.phase(
uvd2.phase_center_ra,
uvd2.phase_center_dec,
uvd2.phase_center_epoch,
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- caused by one unphase/phase cycle.
# the antpos-based phasing differences are based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd2.uvw_array, uvd2_drift.uvw_array, atol=1e-4)
assert np.allclose(uvd2.uvw_array, uvd2_drift_antpos.uvw_array, atol=2e-2)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_set_phase_unknown(casa_uvfits):
uv_object = casa_uvfits
uv_object._set_unknown_phase_type()
assert uv_object.phase_type == "unknown"
assert not uv_object._phase_center_epoch.required
assert not uv_object._phase_center_ra.required
assert not uv_object._phase_center_dec.required
assert uv_object.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_blts(paper_uvh5):
uv_object = paper_uvh5
old_history = uv_object.history
# fmt: off
blt_inds = np.array([172, 182, 132, 227, 144, 44, 16, 104, 385, 134, 326, 140, 116,
218, 178, 391, 111, 276, 274, 308, 38, 64, 317, 76, 239, 246,
34, 39, 83, 184, 208, 60, 374, 295, 118, 337, 261, 21, 375,
396, 355, 187, 95, 122, 186, 113, 260, 264, 156, 13, 228, 291,
302, 72, 137, 216, 299, 341, 207, 256, 223, 250, 268, 147, 73,
32, 142, 383, 221, 203, 258, 286, 324, 265, 170, 236, 8, 275,
304, 117, 29, 167, 15, 388, 171, 82, 322, 248, 160, 85, 66,
46, 272, 328, 323, 152, 200, 119, 359, 23, 363, 56, 219, 257,
11, 307, 336, 289, 136, 98, 37, 163, 158, 80, 125, 40, 298,
75, 320, 74, 57, 346, 121, 129, 332, 238, 93, 18, 330, 339,
381, 234, 176, 22, 379, 199, 266, 100, 90, 292, 205, 58, 222,
350, 109, 273, 191, 368, 88, 101, 65, 155, 2, 296, 306, 398,
369, 378, 254, 67, 249, 102, 348, 392, 20, 28, 169, 262, 269,
287, 86, 300, 143, 177, 42, 290, 284, 123, 189, 175, 97, 340,
242, 342, 331, 282, 235, 344, 63, 115, 78, 30, 226, 157, 133,
71, 35, 212, 333])
# fmt: on
selected_data = uv_object.data_array[np.sort(blt_inds), :, :, :]
uv_object2 = uv_object.copy()
uv_object2.select(blt_inds=blt_inds)
assert len(blt_inds) == uv_object2.Nblts
# verify that histories are different
assert not uvutils._check_histories(old_history, uv_object2.history)
assert uvutils._check_histories(
old_history + " Downselected to " "specific baseline-times using pyuvdata.",
uv_object2.history,
)
assert np.all(selected_data == uv_object2.data_array)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(blt_inds=blt_inds[np.newaxis, :])
assert len(blt_inds) == uv_object2.Nblts
assert uvutils._check_histories(
old_history + " Downselected to " "specific baseline-times using pyuvdata.",
uv_object2.history,
)
assert np.all(selected_data == uv_object2.data_array)
# check that just doing the metadata works properly
uv_object3 = uv_object.copy()
uv_object3.data_array = None
uv_object3.flag_array = None
uv_object3.nsample_array = None
assert uv_object3.metadata_only is True
uv_object4 = uv_object3.select(blt_inds=blt_inds, inplace=False)
for param in uv_object4:
param_name = getattr(uv_object4, param).name
if param_name not in ["data_array", "flag_array", "nsample_array"]:
assert getattr(uv_object4, param) == getattr(uv_object2, param)
else:
assert getattr(uv_object4, param_name) is None
# also check with inplace=True
uv_object3.select(blt_inds=blt_inds)
assert uv_object3 == uv_object4
# check for errors associated with out of bounds indices
pytest.raises(ValueError, uv_object.select, blt_inds=np.arange(-10, -5))
pytest.raises(
ValueError,
uv_object.select,
blt_inds=np.arange(uv_object.Nblts + 1, uv_object.Nblts + 10),
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_antennas(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_ants = np.unique(
uv_object.ant_1_array.tolist() + uv_object.ant_2_array.tolist()
)
ants_to_keep = np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])
blts_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(antenna_nums=ants_to_keep)
assert len(ants_to_keep) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in ants_to_keep:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(antenna_nums=ants_to_keep[np.newaxis, :])
assert len(ants_to_keep) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in ants_to_keep:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uv_object2.history,
)
# now test using antenna_names to specify antennas to keep
uv_object3 = uv_object.copy()
ants_to_keep = np.array(sorted(ants_to_keep))
ant_names = []
for a in ants_to_keep:
ind = np.where(uv_object3.antenna_numbers == a)[0][0]
ant_names.append(uv_object3.antenna_names[ind])
uv_object3.select(antenna_names=ant_names)
assert uv_object2 == uv_object3
# check that it also works with higher dimension array
uv_object3 = uv_object.copy()
ants_to_keep = np.array(sorted(ants_to_keep))
ant_names = []
for a in ants_to_keep:
ind = np.where(uv_object3.antenna_numbers == a)[0][0]
ant_names.append(uv_object3.antenna_names[ind])
uv_object3.select(antenna_names=[ant_names])
assert uv_object2 == uv_object3
# test removing metadata associated with antennas that are no longer present
# also add (different) antenna_diameters to test downselection
uv_object.antenna_diameters = 1.0 * np.ones(
(uv_object.Nants_telescope,), dtype=np.float
)
for i in range(uv_object.Nants_telescope):
uv_object.antenna_diameters += i
uv_object4 = uv_object.copy()
uv_object4.select(antenna_nums=ants_to_keep, keep_all_metadata=False)
assert uv_object4.Nants_telescope == 9
assert set(uv_object4.antenna_numbers) == set(ants_to_keep)
for a in ants_to_keep:
idx1 = uv_object.antenna_numbers.tolist().index(a)
idx2 = uv_object4.antenna_numbers.tolist().index(a)
assert uv_object.antenna_names[idx1] == uv_object4.antenna_names[idx2]
assert np.allclose(
uv_object.antenna_positions[idx1, :], uv_object4.antenna_positions[idx2, :]
)
assert uv_object.antenna_diameters[idx1], uv_object4.antenna_diameters[idx2]
# remove antenna_diameters from object
uv_object.antenna_diameters = None
# check for errors associated with antennas not included in data, bad names
# or providing numbers and names
pytest.raises(
ValueError, uv_object.select, antenna_nums=np.max(unique_ants) + np.arange(1, 3)
)
pytest.raises(ValueError, uv_object.select, antenna_names="test1")
pytest.raises(
ValueError, uv_object.select, antenna_nums=ants_to_keep, antenna_names=ant_names
)
def sort_bl(p):
"""Sort a tuple that starts with a pair of antennas, and may have stuff after."""
if p[1] >= p[0]:
return p
return (p[1], p[0]) + p[2:]
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_bls(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
first_ants = [6, 2, 7, 2, 21, 27, 8]
second_ants = [0, 20, 8, 1, 2, 3, 22]
new_unique_ants = np.unique(first_ants + second_ants)
ant_pairs_to_keep = list(zip(first_ants, second_ants))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object2.history,
)
# check using baseline number parameter
uv_object3 = uv_object.copy()
bls_nums_to_keep = [
uv_object.antnums_to_baseline(ant1, ant2) for ant1, ant2 in sorted_pairs_to_keep
]
uv_object3.select(bls=bls_nums_to_keep)
sorted_pairs_object3 = [
sort_bl(p) for p in zip(uv_object3.ant_1_array, uv_object3.ant_2_array)
]
assert len(new_unique_ants) == uv_object3.Nants_data
assert Nblts_selected == uv_object3.Nblts
for ant in new_unique_ants:
assert ant in uv_object3.ant_1_array or ant in uv_object3.ant_2_array
for ant in np.unique(
uv_object3.ant_1_array.tolist() + uv_object3.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object3
for pair in sorted_pairs_object3:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object3.history,
)
# check select with polarizations
first_ants = [6, 2, 7, 2, 21, 27, 8]
second_ants = [0, 20, 8, 1, 2, 3, 22]
pols = ["RR", "RR", "RR", "RR", "RR", "RR", "RR"]
new_unique_ants = np.unique(first_ants + second_ants)
bls_to_keep = list(zip(first_ants, second_ants, pols))
sorted_bls_to_keep = [sort_bl(p) for p in bls_to_keep]
blts_select = [
sort_bl((a1, a2, "RR")) in sorted_bls_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(bls=bls_to_keep)
sorted_pairs_object2 = [
sort_bl(p) + ("RR",)
for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for bl in sorted_bls_to_keep:
assert bl in sorted_pairs_object2
for bl in sorted_pairs_object2:
assert bl in sorted_bls_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baselines, polarizations using pyuvdata.",
uv_object2.history,
)
# check that you can use numpy integers with out errors:
first_ants = list(map(np.int32, [6, 2, 7, 2, 21, 27, 8]))
second_ants = list(map(np.int32, [0, 20, 8, 1, 2, 3, 22]))
ant_pairs_to_keep = list(zip(first_ants, second_ants))
uv_object2 = uv_object.select(bls=ant_pairs_to_keep, inplace=False)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object2.history,
)
# check that you can specify a single pair without errors
uv_object2.select(bls=(0, 6))
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert list(set(sorted_pairs_object2)) == [(0, 6)]
# check for errors associated with antenna pairs not included in data and bad inputs
with pytest.raises(ValueError) as cm:
uv_object.select(bls=list(zip(first_ants, second_ants)) + [0, 6])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[(uv_object.antenna_names[0], uv_object.antenna_names[1])])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(5, 1))
assert str(cm.value).startswith(
"Antenna number 5 is not present in the " "ant_1_array or ant_2_array"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(0, 5))
assert str(cm.value).startswith(
"Antenna number 5 is not present in the " "ant_1_array or ant_2_array"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(27, 27))
assert str(cm.value).startswith("Antenna pair (27, 27) does not have any data")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(6, 0, "RR"), polarizations="RR")
assert str(cm.value).startswith(
"Cannot provide length-3 tuples and also " "specify polarizations."
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(6, 0, 8))
assert str(cm.value).startswith(
"The third element in each bl must be a " "polarization string"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[100])
assert str(cm.value).startswith("Baseline number 100 is not present in the")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_times(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_times = np.unique(uv_object.time_array)
times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]
Nblts_selected = np.sum([t in times_to_keep for t in uv_object.time_array])
uv_object2 = uv_object.copy()
uv_object2.select(times=times_to_keep)
assert len(times_to_keep) == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(times=times_to_keep[np.newaxis, :])
assert len(times_to_keep) == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
# check for errors associated with times not included in data
pytest.raises(
ValueError,
uv_object.select,
times=[np.min(unique_times) - uv_object.integration_time[0]],
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
times_to_keep = unique_times[
np.nonzero((unique_times <= time_range[1]) & (unique_times >= time_range[0]))
]
Nblts_selected = np.nonzero(
(uv_object.time_array <= time_range[1])
& (uv_object.time_array >= time_range[0])
)[0].size
uv_object2 = uv_object.copy()
uv_object2.select(time_range=time_range)
assert times_to_keep.size == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range_no_data(casa_uvfits):
"""Check for error associated with times not included in data."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
with pytest.raises(ValueError) as cm:
uv_object.select(
time_range=[
np.min(unique_times) - uv_object.integration_time[0] * 2,
np.min(unique_times) - uv_object.integration_time[0],
]
)
assert str(cm.value).startswith("No elements in time range")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_and_time_range(casa_uvfits):
"""Check for error setting times and time_range."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]
with pytest.raises(ValueError) as cm:
uv_object.select(time_range=time_range, times=times_to_keep)
assert str(cm.value).startswith('Only one of "times" and "time_range" can be set')
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range_one_elem(casa_uvfits):
"""Check for error if time_range not length 2."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
with pytest.raises(ValueError) as cm:
uv_object.select(time_range=time_range[0])
assert str(cm.value).startswith("time_range must be length 2")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_frequencies_uvfits(casa_uvfits, tmp_path):
uv_object = casa_uvfits
old_history = uv_object.history
freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that selecting one frequency works
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[0])
assert 1 == uv_object2.Nfreqs
assert freqs_to_keep[0] in uv_object2.freq_array
for f in uv_object2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check for errors associated with frequencies not included in data
pytest.raises(
ValueError,
uv_object.select,
frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],
)
# check for warnings and errors associated with unevenly spaced or
# non-contiguous frequencies
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 5, 6]]},
message=[
"Selected frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_uvfits = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 2, 4]]},
message=[
"Selected frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_frequencies_miriad(casa_uvfits, tmp_path):
pytest.importorskip("pyuvdata._miriad")
uv_object = casa_uvfits
old_history = uv_object.history
freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that selecting one frequency works
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[0])
assert 1 == uv_object2.Nfreqs
assert freqs_to_keep[0] in uv_object2.freq_array
for f in uv_object2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to specific frequencies using pyuvdata.",
uv_object2.history,
)
# check for errors associated with frequencies not included in data
pytest.raises(
ValueError,
uv_object.select,
frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],
)
# check for warnings and errors associated with unevenly spaced or
# non-contiguous frequencies
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 5, 6]]},
message=[
"Selected frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_miriad = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 2, 4]]},
message=[
"Selected frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_freq_chans(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
chans_to_keep = np.arange(12, 22)
uv_object2 = uv_object.copy()
uv_object2.select(freq_chans=chans_to_keep)
assert len(chans_to_keep) == uv_object2.Nfreqs
for chan in chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(freq_chans=chans_to_keep[np.newaxis, :])
assert len(chans_to_keep) == uv_object2.Nfreqs
for chan in chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# Test selecting both channels and frequencies
freqs_to_keep = uv_object.freq_array[0, np.arange(20, 30)] # Overlaps with chans
all_chans_to_keep = np.arange(12, 30)
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep, freq_chans=chans_to_keep)
assert len(all_chans_to_keep) == uv_object2.Nfreqs
for chan in all_chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, all_chans_to_keep]
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_polarizations(casa_uvfits, tmp_path):
uv_object = casa_uvfits
old_history = uv_object.history
pols_to_keep = [-1, -2]
uv_object2 = uv_object.copy()
uv_object2.select(polarizations=pols_to_keep)
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific polarizations using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(polarizations=[pols_to_keep])
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific polarizations using pyuvdata.",
uv_object2.history,
)
# check for errors associated with polarizations not included in data
pytest.raises(ValueError, uv_object2.select, polarizations=[-3, -4])
# check for warnings and errors associated with unevenly spaced polarizations
uvtest.checkWarnings(
uv_object.select,
[],
{"polarizations": uv_object.polarization_array[[0, 1, 3]]},
message=[
"Selected polarization values are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_uvfits = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object.write_uvfits, write_file_uvfits)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select(casa_uvfits):
# now test selecting along all axes at once
uv_object = casa_uvfits
old_history = uv_object.history
# fmt: off
blt_inds = np.array([1057, 461, 1090, 354, 528, 654, 882, 775, 369, 906, 748,
875, 296, 773, 554, 395, 1003, 476, 762, 976, 1285, 874,
717, 383, 1281, 924, 264, 1163, 297, 857, 1258, 1000, 180,
1303, 1139, 393, 42, 135, 789, 713, 527, 1218, 576, 100,
1311, 4, 653, 724, 591, 889, 36, 1033, 113, 479, 322,
118, 898, 1263, 477, 96, 935, 238, 195, 531, 124, 198,
992, 1131, 305, 154, 961, 6, 1175, 76, 663, 82, 637,
288, 1152, 845, 1290, 379, 1225, 1240, 733, 1172, 937, 1325,
817, 416, 261, 1316, 957, 723, 215, 237, 270, 1309, 208,
17, 1028, 895, 574, 166, 784, 834, 732, 1022, 1068, 1207,
356, 474, 313, 137, 172, 181, 925, 201, 190, 1277, 1044,
1242, 702, 567, 557, 1032, 1352, 504, 545, 422, 179, 780,
280, 890, 774, 884])
# fmt: on
ants_to_keep = np.array([11, 6, 20, 26, 2, 27, 7, 14])
ant_pairs_to_keep = [(2, 11), (20, 26), (6, 7), (3, 27), (14, 6)]
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
freqs_to_keep = uv_object.freq_array[0, np.arange(31, 39)]
unique_times = np.unique(uv_object.time_array)
times_to_keep = unique_times[[0, 2, 6, 8, 10, 13, 14]]
pols_to_keep = [-1, -3]
# Independently count blts that should be selected
blts_blt_select = [i in blt_inds for i in np.arange(uv_object.Nblts)]
blts_ant_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
blts_pair_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
blts_time_select = [t in times_to_keep for t in uv_object.time_array]
Nblts_select = np.sum(
[
bi & (ai & pi) & ti
for (bi, ai, pi, ti) in zip(
blts_blt_select, blts_ant_select, blts_pair_select, blts_time_select
)
]
)
uv_object2 = uv_object.copy()
uv_object2.select(
blt_inds=blt_inds,
antenna_nums=ants_to_keep,
bls=ant_pairs_to_keep,
frequencies=freqs_to_keep,
times=times_to_keep,
polarizations=pols_to_keep,
)
assert Nblts_select == uv_object2.Nblts
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baseline-times, antennas, "
"baselines, times, frequencies, "
"polarizations using pyuvdata.",
uv_object2.history,
)
# test that a ValueError is raised if the selection eliminates all blts
pytest.raises(ValueError, uv_object.select, times=unique_times[0], antenna_nums=1)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_not_inplace(casa_uvfits):
# Test non-inplace select
uv_object = casa_uvfits
old_history = uv_object.history
uv1 = uv_object.select(freq_chans=np.arange(32), inplace=False)
uv1 += uv_object.select(freq_chans=np.arange(32, 64), inplace=False)
assert uvutils._check_histories(
old_history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = old_history
assert uv1 == uv_object
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("metadata_only", [True, False])
def test_conjugate_bls(casa_uvfits, metadata_only):
testfile = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
if not metadata_only:
uv1 = casa_uvfits
else:
uv1 = UVData()
uv1.read_uvfits(testfile, read_data=False)
if metadata_only:
assert uv1.metadata_only
# file comes in with ant1<ant2
assert np.min(uv1.ant_2_array - uv1.ant_1_array) >= 0
# check everything swapped & conjugated when go to ant2<ant1
uv2 = uv1.copy()
uv2.conjugate_bls(convention="ant2<ant1")
assert np.min(uv2.ant_1_array - uv2.ant_2_array) >= 0
assert np.allclose(uv1.ant_1_array, uv2.ant_2_array)
assert np.allclose(uv1.ant_2_array, uv2.ant_1_array)
assert np.allclose(
uv1.uvw_array,
-1 * uv2.uvw_array,
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
if not metadata_only:
# complicated because of the polarization swaps
# polarization_array = [-1 -2 -3 -4]
assert np.allclose(
uv1.data_array[:, :, :, :2],
np.conj(uv2.data_array[:, :, :, :2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[:, :, :, 2],
np.conj(uv2.data_array[:, :, :, 3]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[:, :, :, 3],
np.conj(uv2.data_array[:, :, :, 2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
# check everything returned to original values with original convention
uv2.conjugate_bls(convention="ant1<ant2")
assert uv1 == uv2
# conjugate a particular set of blts
blts_to_conjugate = np.arange(uv2.Nblts // 2)
blts_not_conjugated = np.arange(uv2.Nblts // 2, uv2.Nblts)
uv2.conjugate_bls(convention=blts_to_conjugate)
assert np.allclose(
uv1.ant_1_array[blts_to_conjugate], uv2.ant_2_array[blts_to_conjugate]
)
assert np.allclose(
uv1.ant_2_array[blts_to_conjugate], uv2.ant_1_array[blts_to_conjugate]
)
assert np.allclose(
uv1.ant_1_array[blts_not_conjugated], uv2.ant_1_array[blts_not_conjugated]
)
assert np.allclose(
uv1.ant_2_array[blts_not_conjugated], uv2.ant_2_array[blts_not_conjugated]
)
assert np.allclose(
uv1.uvw_array[blts_to_conjugate],
-1 * uv2.uvw_array[blts_to_conjugate],
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
assert np.allclose(
uv1.uvw_array[blts_not_conjugated],
uv2.uvw_array[blts_not_conjugated],
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
if not metadata_only:
# complicated because of the polarization swaps
# polarization_array = [-1 -2 -3 -4]
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, :2],
np.conj(uv2.data_array[blts_to_conjugate, :, :, :2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, :2],
uv2.data_array[blts_not_conjugated, :, :, :2],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, 2],
np.conj(uv2.data_array[blts_to_conjugate, :, :, 3]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, 2],
uv2.data_array[blts_not_conjugated, :, :, 2],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, 3],
np.conj(uv2.data_array[blts_to_conjugate, :, :, 2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, 3],
uv2.data_array[blts_not_conjugated, :, :, 3],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
# check uv half plane conventions
uv2.conjugate_bls(convention="u<0", use_enu=False)
assert np.max(uv2.uvw_array[:, 0]) <= 0
uv2.conjugate_bls(convention="u>0", use_enu=False)
assert np.min(uv2.uvw_array[:, 0]) >= 0
uv2.conjugate_bls(convention="v<0", use_enu=False)
assert np.max(uv2.uvw_array[:, 1]) <= 0
uv2.conjugate_bls(convention="v>0", use_enu=False)
assert np.min(uv2.uvw_array[:, 1]) >= 0
# unphase to drift to test using ENU positions
uv2.unphase_to_drift(use_ant_pos=True)
uv2.conjugate_bls(convention="u<0")
assert np.max(uv2.uvw_array[:, 0]) <= 0
uv2.conjugate_bls(convention="u>0")
assert np.min(uv2.uvw_array[:, 0]) >= 0
uv2.conjugate_bls(convention="v<0")
assert np.max(uv2.uvw_array[:, 1]) <= 0
uv2.conjugate_bls(convention="v>0")
assert np.min(uv2.uvw_array[:, 1]) >= 0
# test errors
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention="foo")
assert str(cm.value).startswith("convention must be one of")
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention=np.arange(5) - 1)
assert str(cm.value).startswith("If convention is an index array")
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention=[uv2.Nblts])
assert str(cm.value).startswith("If convention is an index array")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_reorder_pols(casa_uvfits):
# Test function to fix polarization order
uv1 = casa_uvfits
uv2 = uv1.copy()
uv3 = uv1.copy()
# reorder uv2 manually
order = [1, 3, 2, 0]
uv2.polarization_array = uv2.polarization_array[order]
uv2.data_array = uv2.data_array[:, :, :, order]
uv2.nsample_array = uv2.nsample_array[:, :, :, order]
uv2.flag_array = uv2.flag_array[:, :, :, order]
uv1.reorder_pols(order=order)
assert uv1 == uv2
# Restore original order
uv1 = uv3.copy()
uv2.reorder_pols()
assert uv1 == uv2
uv1.reorder_pols(order="AIPS")
# check that we have aips ordering
aips_pols = np.array([-1, -2, -3, -4]).astype(int)
assert np.all(uv1.polarization_array == aips_pols)
uv2 = uv1.copy()
uv2.reorder_pols(order="CASA")
# check that we have casa ordering
casa_pols = np.array([-1, -3, -4, -2]).astype(int)
assert np.all(uv2.polarization_array == casa_pols)
order = np.array([0, 2, 3, 1])
assert np.all(uv2.data_array == uv1.data_array[:, :, :, order])
assert np.all(uv2.flag_array == uv1.flag_array[:, :, :, order])
uv2.reorder_pols(order="AIPS")
# check that we have aips ordering again
assert uv1 == uv2
# check error on unknown order
pytest.raises(ValueError, uv2.reorder_pols, {"order": "foo"})
# check error if order is an array of the wrong length
with pytest.raises(ValueError) as cm:
uv2.reorder_pols(order=[3, 2, 1])
assert str(cm.value).startswith("If order is an index array, it must")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_reorder_blts(casa_uvfits):
uv1 = casa_uvfits
# test default reordering in detail
uv2 = uv1.copy()
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
assert np.min(np.diff(uv2.time_array)) >= 0
for this_time in np.unique(uv2.time_array):
bls_2 = uv2.baseline_array[np.where(uv2.time_array == this_time)]
bls_1 = uv1.baseline_array[np.where(uv2.time_array == this_time)]
assert bls_1.shape == bls_2.shape
assert np.min(np.diff(bls_2)) >= 0
bl_inds = [np.where(bls_1 == bl)[0][0] for bl in bls_2]
assert np.allclose(bls_1[bl_inds], bls_2)
uvw_1 = uv1.uvw_array[np.where(uv2.time_array == this_time)[0], :]
uvw_2 = uv2.uvw_array[np.where(uv2.time_array == this_time)[0], :]
assert uvw_1.shape == uvw_2.shape
assert np.allclose(uvw_1[bl_inds, :], uvw_2)
data_1 = uv1.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]
data_2 = uv2.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]
assert data_1.shape == data_2.shape
assert np.allclose(data_1[bl_inds, :, :, :], data_2)
# check that ordering by time, ant1 is identical to time, baseline
uv3 = uv1.copy()
uv3.reorder_blts(order="time", minor_order="ant1")
assert uv3.blt_order == ("time", "ant1")
assert np.min(np.diff(uv3.time_array)) >= 0
uv3.blt_order = uv2.blt_order
assert uv2 == uv3
uv3.reorder_blts(order="time", minor_order="ant2")
assert uv3.blt_order == ("time", "ant2")
assert np.min(np.diff(uv3.time_array)) >= 0
# check that loopback works
uv3.reorder_blts()
assert uv2 == uv3
# sort with a specified index array
new_order = np.lexsort((uv3.baseline_array, uv3.time_array))
uv3.reorder_blts(order=new_order)
assert uv3.blt_order is None
assert np.min(np.diff(uv3.time_array)) >= 0
uv3.blt_order = ("time", "baseline")
assert uv2 == uv3
# test sensible defaulting if minor order = major order
uv3.reorder_blts(order="time", minor_order="time")
assert uv2 == uv3
# test all combinations of major, minor order
uv3.reorder_blts(order="baseline")
assert uv3.blt_order == ("baseline", "time")
assert np.min(np.diff(uv3.baseline_array)) >= 0
uv3.reorder_blts(order="ant1")
assert uv3.blt_order == ("ant1", "ant2")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant1", minor_order="time")
assert uv3.blt_order == ("ant1", "time")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant1", minor_order="baseline")
assert uv3.blt_order == ("ant1", "baseline")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant2")
assert uv3.blt_order == ("ant2", "ant1")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="ant2", minor_order="time")
assert uv3.blt_order == ("ant2", "time")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="ant2", minor_order="baseline")
assert uv3.blt_order == ("ant2", "baseline")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="bda")
assert uv3.blt_order == ("bda",)
assert np.min(np.diff(uv3.integration_time)) >= 0
assert np.min(np.diff(uv3.baseline_array)) >= 0
# test doing conjugation along with a reorder
# the file is already conjugated this way, so should be equal
uv3.reorder_blts(order="time", conj_convention="ant1<ant2")
assert uv2 == uv3
# test errors
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="foo")
assert str(cm.value).startswith("order must be one of")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(5))
assert str(cm.value).startswith("If order is an index array, it must")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(5, dtype=np.float))
assert str(cm.value).startswith("If order is an index array, it must")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(uv3.Nblts), minor_order="time")
assert str(cm.value).startswith(
"Minor order cannot be set if order is an index array"
)
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="bda", minor_order="time")
assert str(cm.value).startswith("minor_order cannot be specified if order is")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="baseline", minor_order="ant1")
assert str(cm.value).startswith("minor_order conflicts with order")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="time", minor_order="foo")
assert str(cm.value).startswith("minor_order can only be one of")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_sum_vis(casa_uvfits):
# check sum_vis
uv_full = casa_uvfits
uv_half = uv_full.copy()
uv_half.data_array = uv_full.data_array / 2
uv_summed = uv_half.sum_vis(uv_half)
assert np.array_equal(uv_summed.data_array, uv_full.data_array)
assert uvutils._check_histories(
uv_half.history + " Visibilities summed " "using pyuvdata.", uv_summed.history
)
# check diff_vis
uv_diffed = uv_full.diff_vis(uv_half)
assert np.array_equal(uv_diffed.data_array, uv_half.data_array)
assert uvutils._check_histories(
uv_full.history + " Visibilities " "differenced using pyuvdata.",
uv_diffed.history,
)
# check in place
uv_summed.diff_vis(uv_half, inplace=True)
assert np.array_equal(uv_summed.data_array, uv_half.data_array)
# check error messages
with pytest.raises(ValueError) as cm:
uv_full.sum_vis("foo")
assert str(cm.value).startswith("Only UVData (or subclass) objects can be")
uv_full.instrument = "foo"
with pytest.raises(ValueError) as cm:
uv_full.sum_vis(uv_half, inplace=True)
assert str(cm.value).startswith("UVParameter instrument " "does not match")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add(casa_uvfits):
uv_full = casa_uvfits
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1 += uv2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add frequencies - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv2 += uv1
uv2.history = uv_full.history
assert uv2 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2 += uv1
uv2.history = uv_full.history
assert uv2 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv3 = uv_full.copy()
ants = uv_full.get_ants()
ants1 = ants[0:6]
ants2 = ants[6:12]
ants3 = ants[12:]
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ants1]
ind2 = [i for i in range(uv2.Nblts) if uv2.ant_1_array[i] in ants2]
ind3 = [i for i in range(uv3.Nblts) if uv3.ant_1_array[i] in ants3]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv3.select(blt_inds=ind3)
uv3.data_array = uv3.data_array[-1::-1, :, :, :]
uv3.nsample_array = uv3.nsample_array[-1::-1, :, :, :]
uv3.flag_array = uv3.flag_array[-1::-1, :, :, :]
uv3.uvw_array = uv3.uvw_array[-1::-1, :]
uv3.time_array = uv3.time_array[-1::-1]
uv3.lst_array = uv3.lst_array[-1::-1]
uv3.ant_1_array = uv3.ant_1_array[-1::-1]
uv3.ant_2_array = uv3.ant_2_array[-1::-1]
uv3.baseline_array = uv3.baseline_array[-1::-1]
uv1 += uv3
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata. Combined data along "
"baseline-time axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, polarizations using "
"pyuvdata. Combined data along "
"baseline-time, polarization axis "
"using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.flag_array[blt_ind1, :, :, 2:] = True
uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.flag_array[blt_ind2, :, :, 0:2] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, frequencies using "
"pyuvdata. Combined data along "
"baseline-time, frequency axis using "
"pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.flag_array[blt_ind1, :, 32:, :] = True
uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.flag_array[blt_ind2, :, 0:32, :] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1 + uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.__add__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[1])
uv2.freq_array += uv2._channel_width.tols[1] / 2.0
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=3,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# test add of autocorr-only and crosscorr-only objects
uv_full = UVData()
uv_full.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
bls = uv_full.get_antpairs()
autos = [bl for bl in bls if bl[0] == bl[1]]
cross = sorted(set(bls) - set(autos))
uv_auto = uv_full.select(bls=autos, inplace=False)
uv_cross = uv_full.select(bls=cross, inplace=False)
uv1 = uv_auto + uv_cross
assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls
uv2 = uv_cross + uv_auto
assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_drift(casa_uvfits):
uv_full = casa_uvfits
uv_full.unphase_to_drift()
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1 += uv2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, polarizations using "
"pyuvdata. Combined data along "
"baseline-time, polarization "
"axis using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.flag_array[blt_ind1, :, :, 2:] = True
uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.flag_array[blt_ind2, :, :, 0:2] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, frequencies using "
"pyuvdata. Combined data along "
"baseline-time, frequency "
"axis using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.flag_array[blt_ind1, :, 32:, :] = True
uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.flag_array[blt_ind2, :, 0:32, :] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1 + uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.__add__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_break_add(casa_uvfits):
# Test failure modes of add function
uv_full = casa_uvfits
# Wrong class
uv1 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
pytest.raises(ValueError, uv1.__iadd__, np.zeros(5))
# One phased, one not
uv2 = uv_full.copy()
uv2.unphase_to_drift()
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Different units
uv2 = uv_full.copy()
uv2.select(freq_chans=np.arange(32, 64))
uv2.vis_units = "Jy"
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Overlapping data
uv2 = uv_full.copy()
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Different integration_time
uv2 = uv_full.copy()
uv2.select(freq_chans=np.arange(32, 64))
uv2.integration_time *= 2
pytest.raises(ValueError, uv1.__iadd__, uv2)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_error_drift_and_rephase(casa_uvfits, test_func, extra_kwargs):
uv_full = casa_uvfits
with pytest.raises(ValueError) as cm:
getattr(uv_full, test_func)(
uv_full, phase_center_radec=(0, 45), unphase_to_drift=True, **extra_kwargs
)
assert str(cm.value).startswith(
"phase_center_radec cannot be set if " "unphase_to_drift is True."
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_this_phased_unphase_to_drift(uv_phase_time_split, test_func, extra_kwargs):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
func_kwargs = {
"unphase_to_drift": True,
"inplace": False,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_phase_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Unphasing this UVData object to drift"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "drift"
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_other_phased_unphase_to_drift(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
func_kwargs = {
"unphase_to_drift": True,
"inplace": False,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_phase_2],
func_kwargs=func_kwargs,
message=["Unphasing other UVData object to drift"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "drift"
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_this_rephase_new_phase_center(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad)
# phase each half to different spots
uv_raw_1.phase(
ra=0, dec=0, use_ant_pos=True,
)
uv_raw_2.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True
)
# phase original to phase_center_radec
uv_raw.phase(ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
"use_ant_pos": True,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Phasing this UVData object to phase_center_radec"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_other_rephase_new_phase_center(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad)
# phase each half to different spots
uv_raw_1.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,
)
uv_raw_2.phase(
ra=0, dec=0, use_ant_pos=True,
)
# phase original to phase_center_radec
uv_raw.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,
)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
"use_ant_pos": True,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Phasing other UVData object to phase_center_radec"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "phased"
assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_error_too_long_phase_center(uv_phase_time_split, test_func, extra_kwargs):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad, 7)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
}
func_kwargs.update(extra_kwargs)
with pytest.raises(ValueError) as cm:
getattr(uv_phase_1, test_func)(uv_phase_2, **func_kwargs)
assert str(cm.value).startswith("phase_center_radec should have length 2.")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_fast_concat(casa_uvfits):
uv_full = casa_uvfits
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1.fast_concat(uv2, "freq", inplace=True)
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add frequencies - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uvtest.checkWarnings(
uv2.fast_concat,
func_args=[uv1, "freq"],
func_kwargs={"inplace": True},
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
assert uv2.Nfreqs == uv_full.Nfreqs
assert uv2._freq_array != uv_full._freq_array
assert uv2._data_array != uv_full._data_array
# reorder frequencies and test that they are equal
index_array = np.argsort(uv2.freq_array[0, :])
uv2.freq_array = uv2.freq_array[:, index_array]
uv2.data_array = uv2.data_array[:, :, index_array, :]
uv2.nsample_array = uv2.nsample_array[:, :, index_array, :]
uv2.flag_array = uv2.flag_array[:, :, index_array, :]
uv2.history = uv_full.history
assert uv2._freq_array == uv_full._freq_array
assert uv2 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1.fast_concat(uv2, "polarization", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uvtest.checkWarnings(
uv2.fast_concat,
func_args=[uv1, "polarization"],
func_kwargs={"inplace": True},
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
assert uv2._polarization_array != uv_full._polarization_array
assert uv2._data_array != uv_full._data_array
# reorder pols
uv2.reorder_pols()
uv2.history = uv_full.history
assert uv2 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1.fast_concat(uv2, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
# divide in half to keep in order
ind1 = np.arange(uv1.Nblts // 2)
ind2 = np.arange(uv1.Nblts // 2, uv1.Nblts)
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1.fast_concat(uv2, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1, uv_full
# Add baselines out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv2.fast_concat(uv1, "blt", inplace=True)
# test freq & pol arrays equal
assert uv2._freq_array == uv_full._freq_array
assert uv2._polarization_array == uv_full._polarization_array
# test Nblt length arrays not equal but same shape
assert uv2._ant_1_array != uv_full._ant_1_array
assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape
assert uv2._ant_2_array != uv_full._ant_2_array
assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape
assert uv2._uvw_array != uv_full._uvw_array
assert uv2.uvw_array.shape == uv_full.uvw_array.shape
assert uv2._time_array != uv_full._time_array
assert uv2.time_array.shape == uv_full.time_array.shape
assert uv2._baseline_array != uv_full._baseline_array
assert uv2.baseline_array.shape == uv_full.baseline_array.shape
assert uv2._data_array != uv_full._data_array
assert uv2.data_array.shape == uv_full.data_array.shape
# reorder blts to enable comparison
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
uv2.blt_order = None
uv2.history = uv_full.history
assert uv2 == uv_full
# add baselines such that Nants_data needs to change
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv2.fast_concat(uv1, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv2.history,
)
# test freq & pol arrays equal
assert uv2._freq_array == uv_full._freq_array
assert uv2._polarization_array == uv_full._polarization_array
# test Nblt length arrays not equal but same shape
assert uv2._ant_1_array != uv_full._ant_1_array
assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape
assert uv2._ant_2_array != uv_full._ant_2_array
assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape
assert uv2._uvw_array != uv_full._uvw_array
assert uv2.uvw_array.shape == uv_full.uvw_array.shape
assert uv2._time_array != uv_full._time_array
assert uv2.time_array.shape == uv_full.time_array.shape
assert uv2._baseline_array != uv_full._baseline_array
assert uv2.baseline_array.shape == uv_full.baseline_array.shape
assert uv2._data_array != uv_full._data_array
assert uv2.data_array.shape == uv_full.data_array.shape
# reorder blts to enable comparison
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
uv2.blt_order = None
uv2.history = uv_full.history
assert uv2 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
pytest.raises(ValueError, uv1.fast_concat, uv2, "blt", inplace=True)
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
pytest.raises(ValueError, uv1.fast_concat, uv2, "blt", inplace=True)
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1.fast_concat(uv2, "blt", inplace=False)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[1])
uv2.freq_array += uv2._channel_width.tols[1] / 2.0
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=3,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "polarization"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1.fast_concat(uv2, "polarization", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# test add of autocorr-only and crosscorr-only objects
uv_full = UVData()
uv_full.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
bls = uv_full.get_antpairs()
autos = [bl for bl in bls if bl[0] == bl[1]]
cross = sorted(set(bls) - set(autos))
uv_auto = uv_full.select(bls=autos, inplace=False)
uv_cross = uv_full.select(bls=cross, inplace=False)
uv1 = uv_auto.fast_concat(uv_cross, "blt")
assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls
uv2 = uv_cross.fast_concat(uv_auto, "blt")
assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_fast_concat_errors(casa_uvfits):
uv_full = casa_uvfits
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
pytest.raises(ValueError, uv1.fast_concat, uv2, "foo", inplace=True)
cal = UVCal()
pytest.raises(ValueError, uv1.fast_concat, cal, "freq", inplace=True)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds(casa_uvfits):
# Test function to interpret key as antpair, pol
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant1, ant2, pol))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal([0], indp[0])
# Any of these inputs can also be a tuple of a tuple, so need to be checked twice.
ind1, ind2, indp = uv._key2inds(((ant1, ant2, pol),))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal([0], indp[0])
# Combo with pol as string
ind1, ind2, indp = uv._key2inds((ant1, ant2, uvutils.polnum2str(pol)))
assert np.array_equal([0], indp[0])
ind1, ind2, indp = uv._key2inds(((ant1, ant2, uvutils.polnum2str(pol)),))
assert np.array_equal([0], indp[0])
# Check conjugation
ind1, ind2, indp = uv._key2inds((ant2, ant1, pol))
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal([0], indp[1])
# Conjugation with pol as string
ind1, ind2, indp = uv._key2inds((ant2, ant1, uvutils.polnum2str(pol)))
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal([0], indp[1])
assert np.array_equal([], indp[0])
# Antpair only
ind1, ind2, indp = uv._key2inds((ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
ind1, ind2, indp = uv._key2inds(((ant1, ant2)))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
# Baseline number only
ind1, ind2, indp = uv._key2inds(uv.antnums_to_baseline(ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
ind1, ind2, indp = uv._key2inds((uv.antnums_to_baseline(ant1, ant2),))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
# Pol number only
ind1, ind2, indp = uv._key2inds(pol)
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
ind1, ind2, indp = uv._key2inds((pol))
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
# Pol string only
ind1, ind2, indp = uv._key2inds("LL")
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([1]), indp[0])
ind1, ind2, indp = uv._key2inds(("LL"))
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([1]), indp[0])
# Test invalid keys
pytest.raises(KeyError, uv._key2inds, "I") # pol str not in data
pytest.raises(KeyError, uv._key2inds, -8) # pol num not in data
pytest.raises(KeyError, uv._key2inds, 6) # bl num not in data
pytest.raises(KeyError, uv._key2inds, (1, 1)) # ant pair not in data
pytest.raises(KeyError, uv._key2inds, (1, 1, "rr")) # ant pair not in data
pytest.raises(KeyError, uv._key2inds, (0, 1, "xx")) # pol not in data
# Test autos are handled correctly
uv.ant_2_array[0] = uv.ant_1_array[0]
ind1, ind2, indp = uv._key2inds((ant1, ant1, pol))
assert np.array_equal(ind1, [0])
assert np.array_equal(ind2, [])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols(casa_uvfits):
uv = casa_uvfits
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant2, ant1))
# Pols in data are 'rr', 'll', 'rl', 'lr'
# So conjugated order should be [0, 1, 3, 2]
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal(np.array([]), indp[0])
assert np.array_equal([0, 1, 3, 2], indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_fringe(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
# Mix one instance of this baseline.
uv.ant_1_array[0] = ant2
uv.ant_2_array[0] = ant1
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
assert np.array_equal(np.array([]), indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_bl_fringe(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
# Mix one instance of this baseline.
uv.ant_1_array[0] = ant2
uv.ant_2_array[0] = ant1
uv.baseline_array[0] = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
bl = uvutils.antnums_to_baseline(ant1, ant2, uv.Nants_telescope)
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds(bl)
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
assert np.array_equal(np.array([]), indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_missing_data(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pytest.raises(KeyError, uv._key2inds, (ant2, ant1))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_bls(casa_uvfits):
uv = casa_uvfits
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds(bl)
# Pols in data are 'rr', 'll', 'rl', 'lr'
# So conjugated order should be [0, 1, 3, 2]
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal(np.array([]), indp[0])
assert np.array_equal([0, 1, 3, 2], indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_missing_data_bls(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
pytest.raises(KeyError, uv._key2inds, bl)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_smart_slicing(casa_uvfits):
# Test function to slice data
uv = casa_uvfits
# ind1 reg, ind2 empty, pol reg
ind1 = 10 * np.arange(9)
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a view was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 5.43
assert d[1, 0, 0] == uv.data_array[ind1[1], 0, 0, indp[0]]
# force copy
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), force_copy=True)
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 4.3
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 reg, ind2 empty, pol not reg
ind1 = 10 * np.arange(9)
ind2 = []
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 1.2
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 not reg, ind2 empty, pol reg
ind1 = [0, 4, 5]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 8.2
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 not reg, ind2 empty, pol not reg
ind1 = [0, 4, 5]
ind2 = []
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 3.4
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 empty, ind2 reg, pol reg
# Note conjugation test ensures the result is a copy, not a view.
ind1 = []
ind2 = 10 * np.arange(9)
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 reg, pol not reg
ind1 = []
ind2 = 10 * np.arange(9)
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 not reg, pol reg
ind1 = []
ind2 = [1, 4, 5, 10]
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 not reg, pol not reg
ind1 = []
ind2 = [1, 4, 5, 10]
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1, ind2 not empty, pol reg
ind1 = np.arange(20)
ind2 = np.arange(30, 40)
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))
dcheck = np.append(
uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0
)
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# ind1, ind2 not empty, pol not reg
ind1 = np.arange(20)
ind2 = np.arange(30, 40)
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))
dcheck = np.append(
uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0
)
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# test single element
ind1 = [45]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp], axis=1)
assert np.all(d == dcheck)
# test single element
ind1 = []
ind2 = [45]
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
assert np.all(d == np.conj(dcheck))
# Full squeeze
ind1 = [45]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), squeeze="full")
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# Test invalid squeeze
pytest.raises(
ValueError,
uv._smart_slicing,
uv.data_array,
ind1,
ind2,
(indp, []),
squeeze="notasqueeze",
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_data(casa_uvfits):
# Test get_data function for easy access to data
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.data_array[bltind, :, :, 0])
d = uv.get_data(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_data(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_data((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_data((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_data(ant2, ant1, pol)
assert np.all(dcheck == np.conj(d))
# Check cross pol conjugation
d = uv.get_data(ant2, ant1, uv.polarization_array[2])
d1 = uv.get_data(ant1, ant2, uv.polarization_array[3])
assert np.all(d == np.conj(d1))
# Antpair only
dcheck = np.squeeze(uv.data_array[bltind, :, :, :])
d = uv.get_data(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.data_array[:, :, :, 0])
d = uv.get_data(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_flags(casa_uvfits):
# Test function for easy access to flags
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.flag_array[bltind, :, :, 0])
d = uv.get_flags(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_flags(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_flags((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_flags((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_flags(ant2, ant1, pol)
assert np.all(dcheck == d)
assert d.dtype == np.bool
# Antpair only
dcheck = np.squeeze(uv.flag_array[bltind, :, :, :])
d = uv.get_flags(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.flag_array[:, :, :, 0])
d = uv.get_flags(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_nsamples(casa_uvfits):
# Test function for easy access to nsample array
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.nsample_array[bltind, :, :, 0])
d = uv.get_nsamples(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_nsamples(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_nsamples((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_nsamples((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_nsamples(ant2, ant1, pol)
assert np.all(dcheck == d)
# Antpair only
dcheck = np.squeeze(uv.nsample_array[bltind, :, :, :])
d = uv.get_nsamples(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.nsample_array[:, :, :, 0])
d = uv.get_nsamples(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind(paper_uvh5):
# Test for baseline-time axis indexer
uv = paper_uvh5
# get indices
inds = uv.antpair2ind(0, 1, ordered=False)
# fmt: off
np.testing.assert_array_equal(
inds,
np.array(
[
1, 22, 43, 64, 85, 106, 127, 148, 169,
190, 211, 232, 253, 274, 295, 316, 337,
358, 379
]
)
)
# fmt: on
assert np.issubdtype(inds.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_conj(paper_uvh5):
# conjugate (and use key rather than arg expansion)
uv = paper_uvh5
inds = uv.antpair2ind(0, 1, ordered=False)
inds2 = uv.antpair2ind((1, 0), ordered=False)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_ordered(paper_uvh5):
# test ordered
uv = paper_uvh5
inds = uv.antpair2ind(0, 1, ordered=False)
# make sure conjugated baseline returns nothing
inds2 = uv.antpair2ind(1, 0, ordered=True)
assert inds2.size == 0
# now use baseline actually in data
inds2 = uv.antpair2ind(0, 1, ordered=True)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_autos(paper_uvh5):
# test autos w/ and w/o ordered
uv = paper_uvh5
inds = uv.antpair2ind(0, 0, ordered=True)
inds2 = uv.antpair2ind(0, 0, ordered=False)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds.dtype, np.integer)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_exceptions(paper_uvh5):
# test exceptions
uv = paper_uvh5
with pytest.raises(ValueError, match="antpair2ind must be fed an antpair tuple"):
uv.antpair2ind(1)
with pytest.raises(ValueError, match="antpair2ind must be fed an antpair tuple"):
uv.antpair2ind("bar", "foo")
with pytest.raises(ValueError, match="ordered must be a boolean"):
uv.antpair2ind(0, 1, "foo")
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_times(casa_uvfits):
# Test function for easy access to times, to work in conjunction with get_data
uv = casa_uvfits
# Get an antpair/pol combo (pol shouldn't actually effect result)
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = uv.time_array[bltind]
d = uv.get_times(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_times(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_times((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_times((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_times(ant2, ant1, pol)
assert np.all(dcheck == d)
# Antpair only
d = uv.get_times(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
d = uv.get_times(pol)
assert np.all(d == uv.time_array)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpairpol_iter(casa_uvfits):
# Test generator
uv = casa_uvfits
pol_dict = {
uvutils.polnum2str(uv.polarization_array[i]): i for i in range(uv.Npols)
}
keys = []
pols = set()
bls = set()
for key, d in uv.antpairpol_iter():
keys += key
bl = uv.antnums_to_baseline(key[0], key[1])
blind = np.where(uv.baseline_array == bl)[0]
bls.add(bl)
pols.add(key[2])
dcheck = np.squeeze(uv.data_array[blind, :, :, pol_dict[key[2]]])
assert np.all(dcheck == d)
assert len(bls) == len(uv.get_baseline_nums())
assert len(pols) == uv.Npols
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_ants(casa_uvfits):
# Test function to get unique antennas in data
uv = casa_uvfits
ants = uv.get_ants()
for ant in ants:
assert (ant in uv.ant_1_array) or (ant in uv.ant_2_array)
for ant in uv.ant_1_array:
assert ant in ants
for ant in uv.ant_2_array:
assert ant in ants
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_enu_antpos():
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
# no center, no pick data ants
antpos, ants = uvd.get_ENU_antpos(center=False, pick_data_ants=False)
assert len(ants) == 113
assert np.isclose(antpos[0, 0], 19.340211050751535)
assert ants[0] == 0
# test default behavior
antpos2, ants = uvd.get_ENU_antpos()
assert np.all(antpos == antpos2)
# center
antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=False)
assert np.isclose(antpos[0, 0], 22.472442651767714)
# pick data ants
antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=True)
assert ants[0] == 9
assert np.isclose(antpos[0, 0], -0.0026981323386223721)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_telescope_loc_xyz_check(paper_uvh5, tmp_path):
# test that improper telescope locations can still be read
uv = paper_uvh5
uv.telescope_location = uvutils.XYZ_from_LatLonAlt(*uv.telescope_location)
# fix LST values
uv.set_lsts_from_time_array()
fname = str(tmp_path / "test.uvh5")
uv.write_uvh5(fname, run_check=False, check_extra=False, clobber=True)
# try to read file without checks (passing is implicit)
uv.read(fname, run_check=False)
# try to read without checks: assert it fails
pytest.raises(ValueError, uv.read, fname)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_pols(casa_uvfits):
# Test function to get unique polarizations in string format
uv = casa_uvfits
pols = uv.get_pols()
pols_data = ["rr", "ll", "lr", "rl"]
assert sorted(pols) == sorted(pols_data)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_pols_x_orientation(paper_uvh5):
uv_in = paper_uvh5
uv_in.x_orientation = "east"
pols = uv_in.get_pols()
pols_data = ["en"]
assert pols == pols_data
uv_in.x_orientation = "north"
pols = uv_in.get_pols()
pols_data = ["ne"]
assert pols == pols_data
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_feedpols(casa_uvfits):
# Test function to get unique antenna feed polarizations in data. String format.
uv = casa_uvfits
pols = uv.get_feedpols()
pols_data = ["r", "l"]
assert sorted(pols) == sorted(pols_data)
# Test break when pseudo-Stokes visibilities are present
uv.polarization_array[0] = 1 # pseudo-Stokes I
pytest.raises(ValueError, uv.get_feedpols)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_parse_ants(casa_uvfits):
# Test function to get correct antenna pairs and polarizations
uv = casa_uvfits
# All baselines
ant_str = "all"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Auto correlations
ant_str = "auto"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(ant_pairs_nums) == Counter([])
assert isinstance(polarizations, type(None))
# Cross correlations
ant_str = "cross"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(uv.get_antpairs()) == Counter(ant_pairs_nums)
assert isinstance(polarizations, type(None))
# pseudo-Stokes params
ant_str = "pI,pq,pU,pv"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
pols_expected = [4, 3, 2, 1]
assert isinstance(ant_pairs_nums, type(None))
assert Counter(polarizations) == Counter(pols_expected)
# Unparsible string
ant_str = "none"
pytest.raises(ValueError, uv.parse_ants, ant_str)
# Single antenna number
ant_str = "0"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
# fmt: off
ant_pairs_expected = [(0, 1), (0, 2), (0, 3), (0, 6), (0, 7), (0, 8),
(0, 11), (0, 14), (0, 18), (0, 19), (0, 20),
(0, 21), (0, 22), (0, 23), (0, 24), (0, 26),
(0, 27)]
# fmt: on
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single antenna number not in the data
ant_str = "10"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Antenna"
)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Single antenna number with polarization, both not in the data
ant_str = "10x"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants,
[ant_str],
{},
nwarnings=2,
message=["Warning: Antenna", "Warning: Polarization"],
)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Multiple antenna numbers as list
ant_str = "22,26"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
# fmt: off
ant_pairs_expected = [(0, 22), (0, 26), (1, 22), (1, 26), (2, 22), (2, 26),
(3, 22), (3, 26), (6, 22), (6, 26), (7, 22),
(7, 26), (8, 22), (8, 26), (11, 22), (11, 26),
(14, 22), (14, 26), (18, 22), (18, 26),
(19, 22), (19, 26), (20, 22), (20, 26),
(21, 22), (21, 26), (22, 23), (22, 24),
(22, 26), (22, 27), (23, 26), (24, 26),
(26, 27)]
# fmt: on
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single baseline
ant_str = "1_3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single baseline with polarization
ant_str = "1l_3r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3)]
pols_expected = [-4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Single baseline with single polarization in first entry
ant_str = "1l_3,2x_3"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 3), (2, 3)]
pols_expected = [-2, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Single baseline with single polarization in last entry
ant_str = "1_3l,2_3x"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 3), (2, 3)]
pols_expected = [-2, -3]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines as list
ant_str = "1_2,1_3,1_11"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Multiples baselines with polarizations as list
ant_str = "1r_2l,1l_3l,1r_11r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]
pols_expected = [-1, -2, -3]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Specific baselines with parenthesis
ant_str = "(1,3)_11"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11), (3, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Specific baselines with parenthesis
ant_str = "1_(3,11)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Antenna numbers with polarizations
ant_str = "(1l,2r)_(3l,6r)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols_expected = [-1, -2, -3, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Antenna numbers with - for avoidance
ant_str = "1_(-3,11)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Remove specific antenna number
ant_str = "1,-3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Remove specific baseline (same expected antenna pairs as above example)
ant_str = "1,-1_3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Antenna numbers with polarizations and - for avoidance
ant_str = "1l_(-3r,11l)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Antenna numbers and pseudo-Stokes parameters
ant_str = "(1l,2r)_(3l,6r),pI,pq"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols_expected = [2, 1, -1, -2, -3, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines with multiple polarizations, one pol to be removed
ant_str = "1l_2,1l_3,-1l_3r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines with multiple polarizations, one pol (not in data)
# to be removed
ant_str = "1l_2,1l_3,-1x_3y"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 2), (1, 3)]
pols_expected = [-2, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Test print toggle on single baseline with polarization
ant_str = "1l_2l"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str, print_toggle=True)
ant_pairs_expected = [(1, 2)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Test ant_str='auto' on file with auto correlations
uv = UVData()
testfile = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5")
uv.read(testfile)
ant_str = "auto"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_nums = [
9,
10,
20,
22,
31,
43,
53,
64,
65,
72,
80,
81,
88,
89,
96,
97,
104,
105,
112,
]
ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_autos)
assert isinstance(polarizations, type(None))
# Test cross correlation extraction on data with auto + cross
ant_str = "cross"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_cross = list(itertools.combinations(ant_nums, 2))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_cross)
assert isinstance(polarizations, type(None))
# Remove only polarization of single baseline
ant_str = "all,-9x_10x"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = ant_pairs_autos + ant_pairs_cross
ant_pairs_expected.remove((9, 10))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Test appending all to beginning of strings that start with -
ant_str = "-9"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = ant_pairs_autos + ant_pairs_cross
for ant_i in ant_nums:
ant_pairs_expected.remove((9, ant_i))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_with_ant_str(casa_uvfits):
# Test select function with ant_str argument
uv = casa_uvfits
inplace = False
# All baselines
ant_str = "all"
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Cross correlations
ant_str = "cross"
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# All baselines in data are cross correlations
# Single antenna number
ant_str = "0"
ant_pairs = [
(0, 1),
(0, 2),
(0, 3),
(0, 6),
(0, 7),
(0, 8),
(0, 11),
(0, 14),
(0, 18),
(0, 19),
(0, 20),
(0, 21),
(0, 22),
(0, 23),
(0, 24),
(0, 26),
(0, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single antenna number not present in data
ant_str = "10"
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Antenna",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# Multiple antenna numbers as list
ant_str = "22,26"
ant_pairs = [
(0, 22),
(0, 26),
(1, 22),
(1, 26),
(2, 22),
(2, 26),
(3, 22),
(3, 26),
(6, 22),
(6, 26),
(7, 22),
(7, 26),
(8, 22),
(8, 26),
(11, 22),
(11, 26),
(14, 22),
(14, 26),
(18, 22),
(18, 26),
(19, 22),
(19, 26),
(20, 22),
(20, 26),
(21, 22),
(21, 26),
(22, 23),
(22, 24),
(22, 26),
(22, 27),
(23, 26),
(24, 26),
(26, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single baseline
ant_str = "1_3"
ant_pairs = [(1, 3)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single baseline with polarization
ant_str = "1l_3r"
ant_pairs = [(1, 3)]
pols = ["lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Single baseline with single polarization in first entry
ant_str = "1l_3,2x_3"
# x,y pols not present in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Polarization",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# with polarizations in data
ant_str = "1l_3,2_3"
ant_pairs = [(1, 3), (2, 3)]
pols = ["ll", "lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Single baseline with single polarization in last entry
ant_str = "1_3l,2_3x"
# x,y pols not present in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Polarization",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# with polarizations in data
ant_str = "1_3l,2_3"
ant_pairs = [(1, 3), (2, 3)]
pols = ["ll", "rl"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Multiple baselines as list
ant_str = "1_2,1_3,1_10"
# Antenna number 10 not in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Antenna",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
ant_pairs = [(1, 2), (1, 3)]
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Multiples baselines with polarizations as list
ant_str = "1r_2l,1l_3l,1r_11r"
ant_pairs = [(1, 2), (1, 3), (1, 11)]
pols = ["rr", "ll", "rl"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Specific baselines with parenthesis
ant_str = "(1,3)_11"
ant_pairs = [(1, 11), (3, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Specific baselines with parenthesis
ant_str = "1_(3,11)"
ant_pairs = [(1, 3), (1, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Antenna numbers with polarizations
ant_str = "(1l,2r)_(3l,6r)"
ant_pairs = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols = ["rr", "ll", "rl", "lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Antenna numbers with - for avoidance
ant_str = "1_(-3,11)"
ant_pairs = [(1, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
ant_str = "(-1,3)_11"
ant_pairs = [(3, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove specific antenna number
ant_str = "1,-3"
ant_pairs = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove specific baseline
ant_str = "1,-1_3"
ant_pairs = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Antenna numbers with polarizations and - for avoidance
ant_str = "1l_(-3r,11l)"
ant_pairs = [(1, 11)]
pols = ["ll"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Test pseudo-Stokes params with select
ant_str = "pi,pQ"
pols = ["pQ", "pI"]
uv.polarization_array = np.array([4, 3, 2, 1])
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(pols)
# Test ant_str = 'auto' on file with auto correlations
uv = UVData()
testfile = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5")
uv.read(testfile)
ant_str = "auto"
ant_nums = [
9,
10,
20,
22,
31,
43,
53,
64,
65,
72,
80,
81,
88,
89,
96,
97,
104,
105,
112,
]
ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_autos)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Test cross correlation extraction on data with auto + cross
ant_str = "cross"
ant_pairs_cross = list(itertools.combinations(ant_nums, 2))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_cross)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove only polarization of single baseline
ant_str = "all,-9x_10x"
ant_pairs = ant_pairs_autos + ant_pairs_cross
ant_pairs.remove((9, 10))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Test appending all to beginning of strings that start with -
ant_str = "-9"
ant_pairs = ant_pairs_autos + ant_pairs_cross
for ant_i in ant_nums:
ant_pairs.remove((9, ant_i))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"kwargs,message",
[
(
{"ant_str": "", "antenna_nums": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "antenna_names": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "bls": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "polarizations": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
({"ant_str": "auto"}, "There is no data matching ant_str=auto in this object."),
(
{"ant_str": "pI,pq,pU,pv"},
"Polarization 4 is not present in the polarization_array",
),
({"ant_str": "none"}, "Unparsible argument none"),
],
)
def test_select_with_ant_str_errors(casa_uvfits, kwargs, message):
uv = casa_uvfits
with pytest.raises(ValueError, match=message):
uv.select(**kwargs)
def test_set_uvws_from_antenna_pos():
# Test set_uvws_from_antenna_positions function with phased data
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "1133866760.uvfits")
uv_object.read_uvfits(testfile)
orig_uvw_array = np.copy(uv_object.uvw_array)
with pytest.raises(ValueError) as cm:
uv_object.set_uvws_from_antenna_positions()
assert str(cm.value).startswith("UVW calculation requires unphased data.")
with pytest.raises(ValueError) as cm:
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "xyz"],
message="Data will be unphased",
)
assert str(cm.value).startswith("Invalid parameter orig_phase_frame.")
with pytest.raises(ValueError) as cm:
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "gcrs", "xyz"],
message="Data will be unphased",
)
assert str(cm.value).startswith("Invalid parameter output_phase_frame.")
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "gcrs", "gcrs"],
message="Data will be unphased",
)
max_diff = np.amax(np.absolute(np.subtract(orig_uvw_array, uv_object.uvw_array)))
assert np.isclose(max_diff, 0.0, atol=2)
def test_get_antenna_redundancies():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
old_bl_array = np.copy(uv0.baseline_array)
red_gps, centers, lengths = uv0.get_redundancies(
use_antpos=True, include_autos=False, conjugate_bls=True
)
# new and old baseline Numbers are not the same (different conjugation)
assert not np.allclose(uv0.baseline_array, old_bl_array)
# assert all baselines are in the data (because it's conjugated to match)
for i, gp in enumerate(red_gps):
for bl in gp:
assert bl in uv0.baseline_array
# conjugate data differently
uv0.conjugate_bls(convention="ant1<ant2")
new_red_gps, new_centers, new_lengths, conjs = uv0.get_redundancies(
use_antpos=True, include_autos=False, include_conjugates=True
)
assert conjs is None
apos, anums = uv0.get_ENU_antpos()
new_red_gps, new_centers, new_lengths = uvutils.get_antenna_redundancies(
anums, apos, include_autos=False
)
# all redundancy info is the same
assert red_gps == new_red_gps
assert np.allclose(centers, new_centers)
assert np.allclose(lengths, new_lengths)
@pytest.mark.parametrize("method", ("select", "average"))
@pytest.mark.parametrize("reconjugate", (True, False))
@pytest.mark.parametrize("flagging_level", ("none", "some", "all"))
def test_redundancy_contract_expand(method, reconjugate, flagging_level):
# Test that a UVData object can be reduced to one baseline from each redundant group
# and restored to its original form.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
# Fails at lower precision because some baselines fall into multiple
# redundant groups
tol = 0.02
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds] *= 0
uv0.data_array[inds] += complex(gp_ind)
index_bls = [gp[0] for gp in red_gps]
if flagging_level == "none":
assert np.all(~uv0.flag_array)
elif flagging_level == "some":
# flag all the index baselines in a redundant group
for bl in index_bls:
bl_locs = np.where(uv0.baseline_array == bl)
uv0.flag_array[bl_locs, :, :, :] = True
elif flagging_level == "all":
uv0.flag_array[:] = True
uv0.check()
assert np.all(uv0.flag_array)
if reconjugate:
uv0.conjugate_bls()
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
if method == "average":
gp_bl_use = []
nbls_group = []
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv0.baseline_array]
nbls_group.append(len(bls_init))
bl_use = [bl for bl in gp if bl in uv2.baseline_array]
if len(bl_use) == 0:
# not all possible baselines were present in uv0
gp_bl_use.append(None)
else:
assert len(bl_use) == 1
gp_bl_use.append(bl_use[0])
for gp_ind, bl in enumerate(gp_bl_use):
if bl is None:
continue
if flagging_level == "none" or flagging_level == "all":
assert np.all(uv2.get_nsamples(bl) == nbls_group[gp_ind])
else:
assert np.all(uv2.get_nsamples(bl) == max((nbls_group[gp_ind] - 1), 1))
if flagging_level == "all":
assert np.all(uv2.flag_array)
else:
for gp_ind, bl in enumerate(gp_bl_use):
if nbls_group[gp_ind] > 1:
assert np.all(~uv2.get_flags(bl))
else:
assert np.all(uv2.nsample_array == 1)
if flagging_level == "some" or flagging_level == "all":
assert np.all(uv2.flag_array)
else:
assert np.all(~uv2.flag_array)
# Compare in-place to separated compression.
uv3 = uv0.copy()
uv3.compress_by_redundancy(method=method, tol=tol)
assert uv2 == uv3
# check inflating gets back to the original
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv2.inflate_by_redundancy(tol=tol)
# Confirm that we get the same result looping inflate -> compress -> inflate.
uv3 = uv2.compress_by_redundancy(method=method, tol=tol, inplace=False)
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv3.inflate_by_redundancy(tol=tol)
if method == "average":
# with average, the nsample_array goes up by the number of baselines
# averaged together.
assert not np.allclose(uv3.nsample_array, uv2.nsample_array)
# reset it to test other parameters
uv3.nsample_array = uv2.nsample_array
uv3.history = uv2.history
assert uv2 == uv3
uv2.history = uv0.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv0.reorder_blts(conj_convention="u>0")
uv2.reorder_blts(conj_convention="u>0")
uv2._uvw_array.tols = [0, tol]
if method == "average":
# with average, the nsample_array goes up by the number of baselines
# averaged together.
assert not np.allclose(uv2.nsample_array, uv0.nsample_array)
# reset it to test other parameters
uv2.nsample_array = uv0.nsample_array
if flagging_level == "some":
if method == "select":
# inflated array will be entirely flagged
assert np.all(uv2.flag_array)
assert not np.allclose(uv0.flag_array, uv2.flag_array)
uv2.flag_array = uv0.flag_array
else:
# flag arrays will not match -- inflated array will mostly be unflagged
# it will only be flagged if only one in group
assert not np.allclose(uv0.flag_array, uv2.flag_array)
uv2.flag_array = uv0.flag_array
assert uv2 == uv0
@pytest.mark.parametrize("method", ("select", "average"))
@pytest.mark.parametrize("flagging_level", ("none", "some", "all"))
def test_redundancy_contract_expand_variable_data(method, flagging_level):
# Test that a UVData object can be reduced to one baseline from each redundant group
# and restored to its original form.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
# Fails at lower precision because some baselines fall into multiple
# redundant groups
tol = 0.02
# Assign identical data to each redundant group in comparison object
# Assign data to the index baseline and zeros elsewhere in the one to compress
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
index_bls = [gp[0] for gp in red_gps]
uv0.data_array *= 0
uv1 = uv0.copy()
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv1.data_array[inds] += complex(gp_ind)
if bl in index_bls:
uv0.data_array[inds] += complex(gp_ind)
if flagging_level == "none":
assert np.all(~uv0.flag_array)
elif flagging_level == "some":
# flag all the non index baselines in a redundant group
uv0.flag_array[:, :, :, :] = True
for bl in index_bls:
bl_locs = np.where(uv0.baseline_array == bl)
uv0.flag_array[bl_locs, :, :, :] = False
elif flagging_level == "all":
uv0.flag_array[:] = True
uv0.check()
assert np.all(uv0.flag_array)
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
# inflate to get back to the original size
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv2.inflate_by_redundancy(tol=tol)
uv2.history = uv1.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv1.reorder_blts(conj_convention="u>0")
uv2.reorder_blts(conj_convention="u>0")
uv2._uvw_array.tols = [0, tol]
if method == "select":
if flagging_level == "all":
assert uv2._flag_array != uv1._flag_array
uv2.flag_array = uv1.flag_array
assert uv2 == uv1
else:
if flagging_level == "some":
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv1.baseline_array]
for bl in bls_init:
assert np.all(uv2.get_data(bl) == uv1.get_data(bl))
assert np.all(uv2.get_nsamples(bl) == uv1.get_nsamples(bl))
else:
assert uv2.data_array.min() < uv1.data_array.min()
assert np.all(uv2.data_array <= uv1.data_array)
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv1.baseline_array]
for bl in bls_init:
assert np.all(
uv2.get_data(bl) == (uv1.get_data(bl) / len(bls_init))
)
assert np.all(uv2.get_nsamples(bl) == len(bls_init))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("method", ("select", "average"))
def test_redundancy_contract_expand_nblts_not_nbls_times_ntimes(method, casa_uvfits):
uv0 = casa_uvfits
# check that Nblts != Nbls * Ntimes
assert uv0.Nblts != uv0.Nbls * uv0.Ntimes
tol = 1.0
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for i, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds, ...] *= 0
uv0.data_array[inds, ...] += complex(i)
if method == "average":
with pytest.warns(
UserWarning,
match="Index baseline in the redundant group does not have all the "
"times, compressed object will be missing those times.",
):
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
else:
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
# check inflating gets back to the original
uvtest.checkWarnings(
uv2.inflate_by_redundancy,
func_args={tol: tol},
nwarnings=3,
message=[
"Missing some redundant groups. Filling in available data.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
uv2.history = uv0.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv0.reorder_blts()
uv2.reorder_blts()
uv2._uvw_array.tols = [0, tol]
blt_inds = []
missing_inds = []
for bl, t in zip(uv0.baseline_array, uv0.time_array):
if (bl, t) in zip(uv2.baseline_array, uv2.time_array):
this_ind = np.where((uv2.baseline_array == bl) & (uv2.time_array == t))[0]
blt_inds.append(this_ind[0])
else:
# this is missing because of the compress_by_redundancy step
missing_inds.append(
np.where((uv0.baseline_array == bl) & (uv0.time_array == t))[0]
)
uv3 = uv2.select(blt_inds=blt_inds, inplace=False)
orig_inds_keep = list(np.arange(uv0.Nblts))
for ind in missing_inds:
orig_inds_keep.remove(ind)
uv1 = uv0.select(blt_inds=orig_inds_keep, inplace=False)
if method == "average":
# the nsample array in the original object varies, so they
# don't come out the same
assert not np.allclose(uv3.nsample_array, uv1.nsample_array)
uv3.nsample_array = uv1.nsample_array
assert uv3 == uv1
def test_compress_redundancy_variable_inttime():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
ntimes_in = uv0.Ntimes
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
index_bls = [gp[0] for gp in red_gps]
uv0.data_array *= 0
# set different int time for index baseline in object to compress
uv1 = uv0.copy()
ave_int_time = np.average(uv0.integration_time)
nbls_group = np.zeros(len(red_gps))
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
if inds[0].size > 0:
nbls_group[gp_ind] += 1
uv1.data_array[inds] += complex(gp_ind)
uv0.data_array[inds] += complex(gp_ind)
if bl not in index_bls:
uv0.integration_time[inds] = ave_int_time / 2
assert uv0._integration_time != uv1._integration_time
with pytest.warns(
UserWarning,
match="Integrations times are not identical in a redundant "
"group. Averaging anyway but this may cause unexpected "
"behavior.",
) as warn_record:
uv0.compress_by_redundancy(method="average", tol=tol)
assert len(warn_record) == np.sum(nbls_group > 1) * ntimes_in
uv1.compress_by_redundancy(method="average", tol=tol)
assert uv0 == uv1
@pytest.mark.parametrize("method", ("select", "average"))
def test_compress_redundancy_metadata_only(method):
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for i, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds] *= 0
uv0.data_array[inds] += complex(i)
uv2 = uv0.copy(metadata_only=True)
uv2.compress_by_redundancy(method=method, tol=tol, inplace=True)
uv0.compress_by_redundancy(method=method, tol=tol)
uv0.data_array = None
uv0.flag_array = None
uv0.nsample_array = None
assert uv0 == uv2
def test_compress_redundancy_wrong_method():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
with pytest.raises(ValueError, match="method must be one of"):
uv0.compress_by_redundancy(method="foo", tol=tol, inplace=True)
@pytest.mark.parametrize("method", ("select", "average"))
def test_redundancy_missing_groups(method, tmp_path):
# Check that if I try to inflate a compressed UVData that is missing
# redundant groups, it will raise the right warnings and fill only what
# data are available.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.02
num_select = 19
uv0.compress_by_redundancy(method=method, tol=tol)
fname = str(tmp_path / "temp_hera19_missingreds.uvfits")
bls = np.unique(uv0.baseline_array)[:num_select] # First twenty baseline groups
uv0.select(bls=[uv0.baseline_to_antnums(bl) for bl in bls])
uv0.write_uvfits(fname)
uv1 = UVData()
uv1.read_uvfits(fname)
assert uv0 == uv1 # Check that writing compressed files causes no issues.
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv1.inflate_by_redundancy(tol=tol)
uv2 = uv1.compress_by_redundancy(method=method, tol=tol, inplace=False)
assert np.unique(uv2.baseline_array).size == num_select
def test_quick_redundant_vs_redundant_test_array():
"""Verify the quick redundancy calc returns the same groups as a known array."""
uv = UVData()
uv.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
uv.select(times=uv.time_array[0])
uv.unphase_to_drift()
uv.conjugate_bls(convention="u>0", use_enu=True)
tol = 0.05
# a quick and dirty redundancy calculation
unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)
uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)
uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(
uvw_vectors, axis=1
)
uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)
reds = np.where(uvw_diffs < tol, unique_bls, 0)
reds = np.ma.masked_where(reds == 0, reds)
groups = []
for bl in reds:
grp = []
grp.extend(bl.compressed())
for other_bls in reds:
if set(reds.compressed()).issubset(other_bls.compressed()):
grp.extend(other_bls.compressed())
grp = np.unique(grp).tolist()
groups.append(grp)
pad = len(max(groups, key=len))
groups = np.array([i + [-1] * (pad - len(i)) for i in groups])
groups = np.unique(groups, axis=0)
groups = [[bl for bl in grp if bl != -1] for grp in groups]
groups.sort(key=len)
redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(
tol=tol, include_conjugates=True
)
redundant_groups.sort(key=len)
assert groups == redundant_groups
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_redundancy_finder_when_nblts_not_nbls_times_ntimes(casa_uvfits):
"""Test the redundancy finder functions when Nblts != Nbls * Ntimes."""
tol = 1 # meter
uv = casa_uvfits
uv.conjugate_bls(convention="u>0", use_enu=True)
# check that Nblts != Nbls * Ntimes
assert uv.Nblts != uv.Nbls * uv.Ntimes
# a quick and dirty redundancy calculation
unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)
uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)
uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(
uvw_vectors, axis=1
)
uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)
reds = np.where(uvw_diffs < tol, unique_bls, 0)
reds = np.ma.masked_where(reds == 0, reds)
groups = []
for bl in reds:
grp = []
grp.extend(bl.compressed())
for other_bls in reds:
if set(reds.compressed()).issubset(other_bls.compressed()):
grp.extend(other_bls.compressed())
grp = np.unique(grp).tolist()
groups.append(grp)
pad = len(max(groups, key=len))
groups = np.array([i + [-1] * (pad - len(i)) for i in groups])
groups = np.unique(groups, axis=0)
groups = [[bl for bl in grp if bl != -1] for grp in groups]
groups.sort(key=len)
redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(
tol=tol, include_conjugates=True
)
redundant_groups.sort(key=len)
assert groups == redundant_groups
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_overlapping_data_add(casa_uvfits, tmp_path):
# read in test data
uv = casa_uvfits
# slice into four objects
blts1 = np.arange(500)
blts2 = np.arange(500, 1360)
uv1 = uv.select(polarizations=[-1, -2], blt_inds=blts1, inplace=False)
uv2 = uv.select(polarizations=[-3, -4], blt_inds=blts1, inplace=False)
uv3 = uv.select(polarizations=[-1, -2], blt_inds=blts2, inplace=False)
uv4 = uv.select(polarizations=[-3, -4], blt_inds=blts2, inplace=False)
# combine and check for equality
uvfull = uv1 + uv2
uvfull += uv3
uvfull += uv4
extra_history = (
"Downselected to specific baseline-times, polarizations using pyuvdata. "
"Combined data along polarization axis using pyuvdata. Combined data along "
"baseline-time axis using pyuvdata. Overwrote invalid data using pyuvdata."
)
assert uvutils._check_histories(uvfull.history, uv.history + extra_history)
uvfull.history = uv.history # make histories match
assert uv == uvfull
# check combination not-in-place
uvfull = uv1 + uv2
uvfull += uv3
uvfull = uvfull + uv4
uvfull.history = uv.history # make histories match
assert uv == uvfull
# test raising error for adding objects incorrectly (i.e., having the object
# with data to be overwritten come second)
uvfull = uv1 + uv2
uvfull += uv3
pytest.raises(ValueError, uv4.__iadd__, uvfull)
pytest.raises(ValueError, uv4.__add__, uv4, uvfull)
# write individual objects out, and make sure that we can read in the list
uv1_out = str(tmp_path / "uv1.uvfits")
uv1.write_uvfits(uv1_out)
uv2_out = str(tmp_path / "uv2.uvfits")
uv2.write_uvfits(uv2_out)
uv3_out = str(tmp_path / "uv3.uvfits")
uv3.write_uvfits(uv3_out)
uv4_out = str(tmp_path / "uv4.uvfits")
uv4.write_uvfits(uv4_out)
uvfull = UVData()
uvfull.read(np.array([uv1_out, uv2_out, uv3_out, uv4_out]))
assert uvutils._check_histories(uvfull.history, uv.history + extra_history)
uvfull.history = uv.history # make histories match
assert uvfull == uv
# clean up after ourselves
os.remove(uv1_out)
os.remove(uv2_out)
os.remove(uv3_out)
os.remove(uv4_out)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lsts_from_time_with_only_unique(paper_uvh5):
"""
Test `set_lsts_from_time_array` with only unique values is identical to full array.
"""
uv = paper_uvh5
lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees
# calculate the lsts for all elements in time array
full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)
# use `set_lst_from_time_array` to set the uv.lst_array using only unique values
uv.set_lsts_from_time_array()
assert np.array_equal(full_lsts, uv.lst_array)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lsts_from_time_with_only_unique_background(paper_uvh5):
"""
Test `set_lsts_from_time_array` with only unique values is identical to full array.
"""
uv = paper_uvh5
lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees
# calculate the lsts for all elements in time array
full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)
# use `set_lst_from_time_array` to set the uv.lst_array using only unique values
proc = uv.set_lsts_from_time_array(background=True)
proc.join()
assert np.array_equal(full_lsts, uv.lst_array)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_copy(casa_uvfits):
"""Test the copy method"""
uv_object = casa_uvfits
uv_object_copy = uv_object.copy()
assert uv_object_copy == uv_object
uv_object_copy = uv_object.copy(metadata_only=True)
assert uv_object_copy.metadata_only
for name in uv_object._data_params:
setattr(uv_object, name, None)
assert uv_object_copy == uv_object
uv_object_copy = uv_object.copy()
assert uv_object_copy == uv_object
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time(hera_uvh5):
"""Test the upsample_in_time method"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the same
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_with_flags(hera_uvh5):
"""Test the upsample_in_time method with flags"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
# add flags and upsample again
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
# data and nsamples should be changed as normal, but flagged
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
out_flags = uv_object.get_flags(0, 1)
assert np.all(out_flags[:2, 0, 0])
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_noninteger_resampling(hera_uvh5):
"""Test the upsample_in_time method with a non-integer resampling factor"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) * 0.75
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be different by a factor of 2
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_errors(hera_uvh5):
"""Test errors and warnings raised by upsample_in_time"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# test using a too-small integration time
max_integration_time = 1e-3 * np.amin(uv_object.integration_time)
with pytest.raises(ValueError) as cm:
uv_object.upsample_in_time(max_integration_time)
assert str(cm.value).startswith("Decreasing the integration time by more than")
# catch a warning for doing no work
uv_object2 = uv_object.copy()
max_integration_time = 2 * np.amax(uv_object.integration_time)
uvtest.checkWarnings(
uv_object.upsample_in_time,
[max_integration_time],
message="All values in integration_time array are already shorter",
)
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the half the input
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode_with_flags(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode and flags"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# add flags and upsample again
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
# data and nsamples should be changed as normal, but flagged
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
out_flags = uv_object.get_flags(0, 1)
assert np.all(out_flags[:2, 0, 0])
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode_nonint_resampling(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode
and non-integer resampling
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# try again with a non-integer resampling factor
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) * 0.75
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be half the input
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_partial_upsample_in_time(hera_uvh5):
"""Test the upsample_in_time method with non-uniform upsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# change a whole baseline's integration time
bl_inds = uv_object.antpair2ind(0, 1)
uv_object.integration_time[bl_inds] = uv_object.integration_time[0] / 2.0
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf_01 = uv_object.get_data(0, 1)
init_wf_02 = uv_object.get_data(0, 2)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns_01 = uv_object.get_nsamples(0, 1)
init_ns_02 = uv_object.get_nsamples(0, 2)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time)
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time)
# output data should be the same
out_wf_01 = uv_object.get_data(0, 1)
out_wf_02 = uv_object.get_data(0, 2)
assert np.all(init_wf_01 == out_wf_01)
assert np.isclose(init_wf_02[0, 0, 0], out_wf_02[0, 0, 0])
assert init_wf_02.size * 2 == out_wf_02.size
# this should be true because there are no flags
out_ns_01 = uv_object.get_nsamples(0, 1)
out_ns_02 = uv_object.get_nsamples(0, 2)
assert np.allclose(out_ns_01, init_ns_01)
assert np.isclose(init_ns_02[0, 0, 0], out_ns_02[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_drift(hera_uvh5):
"""Test the upsample_in_time method on drift mode data"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", allow_drift=True
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the same
out_wf = uv_object.get_data(0, 1)
# we need a "large" tolerance given the "large" data
new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_drift_no_phasing(hera_uvh5):
"""Test the upsample_in_time method on drift mode data without phasing"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
# upsample with allow_drift=False
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", allow_drift=False
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be similar, but somewhat different because of the phasing
out_wf = uv_object.get_data(0, 1)
# we need a "large" tolerance given the "large" data
new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time(hera_uvh5):
"""Test the downsample_in_time method"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
# histories are different when n_times_to_avg is set vs min_int_time
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
assert not isinstance(uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_partial_flags(hera_uvh5):
"""Test the downsample_in_time method with partial flagging"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object2 = uv_object.copy()
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_totally_flagged(hera_uvh5):
"""Test the downsample_in_time method with totally flagged integrations"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add more flags and try again. When all the input points are flagged,
# data and nsample should have the same results as no flags but the output
# should be flagged
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[:2], 0, 0, 0] = True
uv_object2 = uv_object.copy()
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that the new sample is flagged
out_flag = uv_object.get_flags(0, 1)
assert out_flag[0, 0, 0]
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_uneven_samples(hera_uvh5):
"""Test the downsample_in_time method with uneven downsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
)
# Only some baselines have an even number of times, so the output integration time
# is not uniformly the same. For the test case, we'll have *either* the original
# integration time or twice that.
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, original_int_time),
np.isclose(uv_object.integration_time, min_integration_time),
)
)
# make sure integration time is correct
# in this case, all integration times should be the target one
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# as usual, the new data should be the average of the input data (3 points now)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=3, blt_order="baseline", minor_order="time", keep_ragged=False
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_uneven_samples_keep_ragged(hera_uvh5):
"""Test downsample_in_time with uneven downsampling and keep_ragged=True."""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
# test again with keep_ragged=False
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=True,
)
# as usual, the new data should be the average of the input data
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=3, blt_order="baseline", minor_order="time", keep_ragged=True
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the sum
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_partial_flags(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
partial flags
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_totally_flagged(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
totally flagged integrations.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add more flags and try again. When all the input points are flagged,
# data and nsample should have the same results as no flags but the output
# should be flagged
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[:2], 0, 0, 0] = True
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that the new sample is flagged
out_flag = uv_object.get_flags(0, 1)
assert out_flag[0, 0, 0]
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_uneven_samples(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
uneven samples.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
summing_correlator_mode=True,
)
# Only some baselines have an even number of times, so the output integration time
# is not uniformly the same. For the test case, we'll have *either* the original
# integration time or twice that.
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, original_int_time),
np.isclose(uv_object.integration_time, min_integration_time),
)
)
# as usual, the new data should be the average of the input data (3 points now)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_uneven_samples_drop_ragged(
hera_uvh5,
):
"""Test the downsample_in_time method with summing correlator mode and
uneven samples, dropping ragged ones.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test again with keep_ragged=False
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
summing_correlator_mode=True,
)
# make sure integration time is correct
# in this case, all integration times should be the target one
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# as usual, the new data should be the average of the input data
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_partial_downsample_in_time(hera_uvh5):
"""Test the downsample_in_time method without uniform downsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# change a whole baseline's integration time
bl_inds = uv_object.antpair2ind(0, 1)
uv_object.integration_time[bl_inds] = uv_object.integration_time[0] * 2.0
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf_01 = uv_object.get_data(0, 1)
init_wf_02 = uv_object.get_data(0, 2)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns_01 = uv_object.get_nsamples(0, 1)
init_ns_02 = uv_object.get_nsamples(0, 2)
# change the target integration time
min_integration_time = np.amax(uv_object.integration_time)
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline"
)
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# output data should be the same
out_wf_01 = uv_object.get_data(0, 1)
out_wf_02 = uv_object.get_data(0, 2)
assert np.all(init_wf_01 == out_wf_01)
assert np.isclose(
(init_wf_02[0, 0, 0] + init_wf_02[1, 0, 0]) / 2.0, out_wf_02[0, 0, 0]
)
# this should be true because there are no flags
out_ns_01 = uv_object.get_nsamples(0, 1)
out_ns_02 = uv_object.get_nsamples(0, 2)
assert np.allclose(out_ns_01, init_ns_01)
assert np.isclose(
(init_ns_02[0, 0, 0] + init_ns_02[1, 0, 0]) / 2.0, out_ns_02[0, 0, 0]
)
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_drift(hera_uvh5):
"""Test the downsample_in_time method on drift mode data"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", allow_drift=True
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", allow_drift=True
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_drift_no_phasing(hera_uvh5):
"""Test the downsample_in_time method on drift mode data without phasing"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# try again with allow_drift=False
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", allow_drift=False,
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be similar to the average, but somewhat different
# because of the phasing
out_wf = uv_object.get_data(0, 1)
new_tol = 5e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(
(init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0], atol=new_tol
)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_nsample_precision(hera_uvh5):
"""Test the downsample_in_time method with a half-precision nsample_array"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object2 = uv_object.copy()
# change precision of nsample array
uv_object.nsample_array = uv_object.nsample_array.astype(np.float16)
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# make sure nsamples has the right dtype
assert uv_object.nsample_array.dtype.type is np.float16
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.nsample_array = uv_object2.nsample_array.astype(np.float16)
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_errors(hera_uvh5):
"""Test various errors and warnings are raised"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# raise an error if set neither min_int_time and n_times_to_avg
with pytest.raises(
ValueError, match="Either min_int_time or n_times_to_avg must be set."
):
uv_object.downsample_in_time()
# raise an error if set both min_int_time and n_times_to_avg
with pytest.raises(
ValueError, match="Only one of min_int_time or n_times_to_avg can be set."
):
uv_object.downsample_in_time(
min_int_time=2 * np.amin(uv_object.integration_time), n_times_to_avg=2
)
# raise an error if only one time
uv_object2 = uv_object.copy()
uv_object2.select(times=uv_object2.time_array[0])
with pytest.raises(
ValueError, match="Only one time in this object, cannot downsample."
):
uv_object2.downsample_in_time(n_times_to_avg=2)
# raise an error for a too-large integration time
max_integration_time = 1e3 * np.amax(uv_object.integration_time)
with pytest.raises(
ValueError, match="Increasing the integration time by more than"
):
uv_object.downsample_in_time(min_int_time=max_integration_time)
# catch a warning for doing no work
uv_object2 = uv_object.copy()
max_integration_time = 0.5 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.downsample_in_time(min_int_time=max_integration_time)
assert uv_object == uv_object2
del uv_object2
# raise an error if n_times_to_avg is not an integer
with pytest.raises(ValueError, match="n_times_to_avg must be an integer."):
uv_object.downsample_in_time(n_times_to_avg=2.5)
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# make a gap in the times to check a warning about that
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-1]] += initial_int_time / (24 * 3600)
uv_object.Ntimes += 1
min_integration_time = 2 * np.amin(uv_object.integration_time)
times_01 = uv_object.get_times(0, 1)
assert np.unique(np.diff(times_01)).size > 1
with pytest.warns(UserWarning, match=("There is a gap in the times of baseline")):
uv_object.downsample_in_time(min_int_time=min_integration_time)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_int_time_mismatch_warning(hera_uvh5):
"""Test warning in downsample_in_time about mismatch between integration
times and the time between integrations.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the integration times to catch a warning about integration times
# not matching the time delta between integrations
uv_object.integration_time *= 0.5
min_integration_time = 2 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="The time difference between integrations is not the same"
) as record:
uv_object.downsample_in_time(min_int_time=min_integration_time)
assert len(record) == 11
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_integration_time(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test handling (& warnings) with varying integration time in a baseline
# First, change both integration time & time array to match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)
uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)
uv_object.integration_time[inds01[-2:]] += initial_int_time
uv_object.Ntimes = np.unique(uv_object.time_array).size
min_integration_time = 2 * np.amin(uv_object.integration_time)
# check that there are no warnings about inconsistencies between
# integration_time & time_array
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object.downsample_in_time(min_int_time=min_integration_time)
assert len(record) == 1
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
out_wf = uv_object.get_data(0, 1)
n_times_in = init_wf.shape[0]
n_times_out = out_wf.shape[0]
assert n_times_out == (n_times_in - 2) / 2 + 2
# output data should be the average for the first set
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# last 2 time samples should be identical to initial ones
assert np.isclose(init_wf[-1, 0, 0], out_wf[-1, 0, 0])
assert np.isclose(init_wf[-2, 0, 0], out_wf[-2, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
assert np.isclose(init_ns[-1, 0, 0], out_ns[-1, 0, 0])
assert np.isclose(init_ns[-2, 0, 0], out_ns[2, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_int_time_partial_flags(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline and partial flagging.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# downselect to 14 times and one baseline
uv_object.select(times=np.unique(uv_object.time_array)[:14])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# change last 2 integrations to be twice as long
# (so 12 normal length, 2 double length)
# change integration time & time array to match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)
uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)
uv_object.integration_time[inds01[-2:]] += initial_int_time
uv_object.Ntimes = np.unique(uv_object.time_array).size
# add a flag on last time
uv_object.flag_array[inds01[-1], :, :, :] = True
# add a flag on thrid to last time
uv_object.flag_array[inds01[-3], :, :, :] = True
uv_object2 = uv_object.copy()
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object.downsample_in_time(min_int_time=4 * initial_int_time)
assert len(record) == 1
with pytest.warns(None) as record:
uv_object.downsample_in_time(min_int_time=8 * initial_int_time)
assert len(record) == 0
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object2.downsample_in_time(min_int_time=8 * initial_int_time)
assert len(record) == 1
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_integration_time_warning(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline, but without adjusting the time_array so there is a mismatch.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# Next, change just integration time, so time array doesn't match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
uv_object.integration_time[inds01[-2:]] += initial_int_time
min_integration_time = 2 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="The time difference between integrations is different than"
):
uv_object.downsample_in_time(min_int_time=min_integration_time)
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:Data will be unphased and rephased")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time(hera_uvh5):
"""Test round trip works"""
uv_object = hera_uvh5
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
new_Nblts = uv_object.Nblts
# check that calling upsample again with the same max_integration_time
# gives warning and does nothing
uvtest.checkWarnings(
uv_object.upsample_in_time,
func_args=[max_integration_time],
func_kwargs={"blt_order": "baseline"},
message="All values in the integration_time array are " "already longer",
)
assert uv_object.Nblts == new_Nblts
# check that calling upsample again with the almost the same max_integration_time
# gives warning and does nothing
small_number = 0.9 * uv_object._integration_time.tols[1]
uvtest.checkWarnings(
uv_object.upsample_in_time,
func_args=[max_integration_time - small_number],
func_kwargs={"blt_order": "baseline"},
message="All values in the integration_time array are " "already longer",
)
assert uv_object.Nblts == new_Nblts
uv_object.downsample_in_time(
min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.939524 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
# check that calling downsample again with the same min_integration_time
# gives warning and does nothing
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.downsample_in_time(
min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline"
)
assert uv_object.Nblts == uv_object2.Nblts
# check that calling upsample again with the almost the same min_integration_time
# gives warning and does nothing
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.upsample_in_time(
np.amin(uv_object2.integration_time) + small_number, blt_order="baseline"
)
assert uv_object.Nblts == uv_object2.Nblts
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:Data will be unphased and rephased")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time_odd_resample(hera_uvh5):
"""Test round trip works with odd resampling"""
uv_object = hera_uvh5
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# try again with a resampling factor of 3 (test odd numbers)
max_integration_time = np.amin(uv_object.integration_time) / 3.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
uv_object.downsample_in_time(
np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.626349 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time_metadata_only(hera_uvh5):
"""Test round trip works with metadata-only objects"""
uv_object = hera_uvh5
# drop the data arrays
uv_object.data_array = None
uv_object.flag_array = None
uv_object.nsample_array = None
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
uv_object.downsample_in_time(
np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.939524 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time(bda_test_file):
"""Test the resample_in_time method"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
uv_object.resample_in_time(8)
# Should have all the target integration time
assert np.all(np.isclose(uv_object.integration_time, 8))
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size * 4 == init_data_1_136.size
assert out_data_1_137.size * 2 == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size / 2 == init_data_136_137.size
# check some values
assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])
assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_downsample_only(bda_test_file):
"""Test resample_in_time with downsampling only"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
# resample again, with only_downsample set
uv_object.resample_in_time(8, only_downsample=True)
# Should have all less than or equal to the target integration time
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, 8),
np.isclose(uv_object.integration_time, 16),
)
)
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size * 4 == init_data_1_136.size
assert out_data_1_137.size * 2 == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size == init_data_136_137.size
# check some values
assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])
assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_only_upsample(bda_test_file):
"""Test resample_in_time with only upsampling"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
# again, with only_upsample set
uv_object.resample_in_time(8, only_upsample=True)
# Should have all greater than or equal to the target integration time
assert np.all(
np.logical_or(
np.logical_or(
np.isclose(uv_object.integration_time, 2.0),
np.isclose(uv_object.integration_time, 4.0),
),
np.isclose(uv_object.integration_time, 8.0),
)
)
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size == init_data_1_136.size
assert out_data_1_137.size == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size / 2 == init_data_136_137.size
# check some values
assert np.isclose(init_data_1_136[0, 0, 0], out_data_1_136[0, 0, 0])
assert np.isclose(init_data_1_137[0, 0, 0], out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_partial_flags(bda_test_file):
"""Test resample_in_time with partial flags"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv = bda_test_file
# For ease, select a single baseline
uv.select(bls=[(1, 136)])
# Flag one time
uv.flag_array[0, :, :, :] = True
uv2 = uv.copy()
# Downsample in two stages
uv.resample_in_time(4.0, only_downsample=True)
uv.resample_in_time(8.0, only_downsample=True)
# Downsample in a single stage
uv2.resample_in_time(8.0, only_downsample=True)
assert uv.history != uv2.history
uv2.history = uv.history
assert uv == uv2
return
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_downsample_in_time_mwa():
"""
Test resample in time works with numerical weirdnesses.
In particular, when min_int_time is not quite an integer mulitple of
integration_time. This text broke with a prior bug (see issue 773).
"""
filename = os.path.join(DATA_PATH, "mwa_integration_time.uvh5")
uv = UVData()
uv.read(filename)
uv.phase_to_time(np.mean(uv.time_array))
uv_object2 = uv.copy()
# all data within 5 milliseconds of 2 second integrations
assert np.allclose(uv.integration_time, 2, atol=5e-3)
min_int_time = 4.0
uv.resample_in_time(min_int_time, only_downsample=True, keep_ragged=False)
assert np.all(uv.integration_time > (min_int_time - 5e-3))
# Now do the human expected thing:
init_data = uv_object2.get_data((61, 58))
uv_object2.downsample_in_time(n_times_to_avg=2, keep_ragged=False)
assert uv_object2.Ntimes == 5
out_data = uv_object2.get_data((61, 58))
assert np.isclose(np.mean(init_data[0:2, 0, 0]), out_data[0, 0, 0])
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_warning():
filename = os.path.join(DATA_PATH, "mwa_integration_time.uvh5")
uv = UVData()
uv.read(filename)
uv2 = uv.copy()
with pytest.warns(
UserWarning, match="No resampling will be done because target time"
):
uv.resample_in_time(3, keep_ragged=False)
assert uv2 == uv
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average(uvdata_data):
"""Test averaging in frequency."""
eq_coeffs = np.tile(
np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),
(uvdata_data.uv_object.Nants_telescope, 1),
)
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.check()
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
uvtest.checkWarnings(
uvdata_data.uv_object.frequency_average,
[2],
message="eq_coeffs vary by frequency",
)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_coeffs = eq_coeffs.reshape(
uvdata_data.uv_object2.Nants_telescope,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_uneven(uvdata_data):
"""Test averaging in frequency with a number that is not a factor of Nfreqs."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
with pytest.warns(
UserWarning,
match="Nfreqs does not divide by `n_chan_to_avg` evenly. The final 1 "
"frequencies will be excluded, to control which frequencies to exclude, "
"use a select to control.",
):
uvdata_data.uv_object.frequency_average(7)
assert uvdata_data.uv_object2.Nfreqs % 7 != 0
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs // 7)
expected_freqs = uvdata_data.uv_object2.freq_array[
:, np.arange((uvdata_data.uv_object2.Nfreqs // 7) * 7)
]
expected_freqs = expected_freqs.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs // 7), 7
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
expected_data = expected_data[
:, :, 0 : ((uvdata_data.uv_object2.Nfreqs // 7) * 7), :
]
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs // 7),
7,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging(uvdata_data):
"""Test averaging in frequency with flagging all samples averaged."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:2, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 2
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.sum(uvdata_data.uv_object.flag_array[inds01[0], :, 0, :]) == 4
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
assert (
np.nonzero(uvdata_data.uv_object.flag_array[inds01[1:], :, 0, :])[0].size == 0
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_partial(uvdata_data):
"""Test averaging in frequency with flagging only one sample averaged."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 0, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 1, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_full_and_partial(uvdata_data):
"""
Test averaging in frequency with flagging all of one and only one of
another sample averaged.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 3
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_partial_twostage(uvdata_data):
"""
Test averaging in frequency in two stages with flagging only one sample averaged.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
uv_object3 = uvdata_data.uv_object.copy()
uvdata_data.uv_object.frequency_average(2)
uvdata_data.uv_object.frequency_average(2)
uv_object3.frequency_average(4)
assert uvdata_data.uv_object == uv_object3
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_summing_corr_mode(uvdata_data):
"""Test averaging in frequency."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
uvdata_data.uv_object.frequency_average(2, summing_correlator_mode=True)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).sum(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_propagate_flags(uvdata_data):
"""
Test averaging in frequency with flagging all of one and only one of
another sample averaged, and propagating flags. Data should be identical,
but flags should be slightly different compared to other test of the same
name.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 3
)
uvdata_data.uv_object.frequency_average(2, propagate_flags=True)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
# Twice as many flags should exist compared to test of previous name.
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== 2 * uvdata_data.uv_object.Npols
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_nsample_precision(uvdata_data):
"""Test averaging in frequency with a half-precision nsample_array."""
eq_coeffs = np.tile(
np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),
(uvdata_data.uv_object.Nants_telescope, 1),
)
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.check()
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# change precision of the nsample array
uvdata_data.uv_object.nsample_array = uvdata_data.uv_object.nsample_array.astype(
np.float16
)
uvtest.checkWarnings(
uvdata_data.uv_object.frequency_average,
[2],
message="eq_coeffs vary by frequency",
)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_coeffs = eq_coeffs.reshape(
uvdata_data.uv_object2.Nants_telescope,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
# make sure we still have a half-precision nsample_array
assert uvdata_data.uv_object.nsample_array.dtype.type is np.float16
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_divide(uvdata_data):
"""Test using the remove_eq_coeffs method with divide convention."""
# give eq_coeffs to the object
eq_coeffs = np.empty(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),
dtype=np.float,
)
for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):
eq_coeffs[i, :] = ant + 1
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.eq_coeffs_convention = "divide"
uvdata_data.uv_object.remove_eq_coeffs()
# make sure the right coefficients were removed
for key in uvdata_data.uv_object.get_antpairs():
eq1 = key[0] + 1
eq2 = key[1] + 1
blt_inds = uvdata_data.uv_object.antpair2ind(key)
norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]
unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]
assert np.allclose(norm_data, unnorm_data / (eq1 * eq2))
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_multiply(uvdata_data):
"""Test using the remove_eq_coeffs method with multiply convention."""
# give eq_coeffs to the object
eq_coeffs = np.empty(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),
dtype=np.float,
)
for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):
eq_coeffs[i, :] = ant + 1
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.eq_coeffs_convention = "multiply"
uvdata_data.uv_object.remove_eq_coeffs()
# make sure the right coefficients were removed
for key in uvdata_data.uv_object.get_antpairs():
eq1 = key[0] + 1
eq2 = key[1] + 1
blt_inds = uvdata_data.uv_object.antpair2ind(key)
norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]
unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]
assert np.allclose(norm_data, unnorm_data * (eq1 * eq2))
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_errors(uvdata_data):
"""Test errors raised by remove_eq_coeffs method."""
# raise error when eq_coeffs are not defined
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith("The eq_coeffs attribute must be defined")
# raise error when eq_coeffs are defined but not eq_coeffs_convention
uvdata_data.uv_object.eq_coeffs = np.ones(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs)
)
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith(
"The eq_coeffs_convention attribute must be defined"
)
# raise error when convention is not a valid choice
uvdata_data.uv_object.eq_coeffs_convention = "foo"
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith("Got unknown convention foo. Must be one of")
return
@pytest.mark.parametrize(
"read_func,filelist",
[
("read_miriad", [os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA")] * 2),
(
"read_mwa_corr_fits",
[[mwa_corr_files[0:2], [mwa_corr_files[0], mwa_corr_files[2]]]],
),
("read_uvh5", [os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")] * 2),
(
"read_uvfits",
[os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")] * 2,
),
(
"read_ms",
[
os.path.join(DATA_PATH, "multi_1.ms"),
os.path.join(DATA_PATH, "multi_2.ms"),
],
),
(
"read_fhd",
[
list(np.array(fhd_files)[[0, 1, 2, 4, 6, 7]]),
list(np.array(fhd_files)[[0, 2, 3, 5, 6, 7]]),
],
),
],
)
def test_multifile_read_errors(read_func, filelist):
uv = UVData()
with pytest.raises(ValueError) as cm:
getattr(uv, read_func)(filelist)
assert str(cm.value).startswith(
"Reading multiple files from class specific read functions is no "
"longer supported."
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_multifile_read_check(hera_uvh5, tmp_path):
"""Test setting skip_bad_files=True when reading in files"""
uvTrue = hera_uvh5
uvh5_file = os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")
# Create a test file and remove header info to 'corrupt' it
testfile = str(tmp_path / "zen.2458661.23480.HH.uvh5")
uvTrue.write_uvh5(testfile)
with h5py.File(testfile, "r+") as h5f:
del h5f["Header/ant_1_array"]
uv = UVData()
# Test that the expected error arises
with pytest.raises(KeyError) as cm:
uv.read(testfile, skip_bad_files=False)
assert "Unable to open object (object 'ant_1_array' doesn't exist)" in str(cm.value)
# Test when the corrupted file is at the beggining, skip_bad_files=False
fileList = [testfile, uvh5_file]
with pytest.raises(KeyError) as cm:
with pytest.warns(UserWarning, match="Failed to read"):
uv.read(fileList, skip_bad_files=False)
assert "Unable to open object (object 'ant_1_array' doesn't exist)" in str(cm.value)
assert uv != uvTrue
# Test when the corrupted file is at the beggining, skip_bad_files=True
fileList = [testfile, uvh5_file]
with pytest.warns(UserWarning, match="Failed to read") as record:
uv.read(fileList, skip_bad_files=True)
assert len(record) == 2
assert str(record[1].message).startswith("Failed to read")
assert str(record[0].message).startswith(
"The uvw_array does not match the expected values given the antenna positions."
)
assert uv == uvTrue
# Test when the corrupted file is at the end of a list
fileList = [uvh5_file, testfile]
with pytest.warns(UserWarning, match="Failed to read") as cm:
uv.read(fileList, skip_bad_files=True)
# Check that the uncorrupted file was still read in
assert uv == uvTrue
os.remove(testfile)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("err_type", ["KeyError", "ValueError"])
def test_multifile_read_check_long_list(hera_uvh5, tmp_path, err_type):
"""
Test KeyError catching by setting skip_bad_files=True when
reading in files for a list of length >2
"""
# Create mini files for testing
uv = hera_uvh5
fileList = []
for i in range(0, 4):
uv2 = uv.select(
times=np.unique(uv.time_array)[i * 5 : i * 5 + 4], inplace=False
)
fname = str(tmp_path / f"minifile_{i}.uvh5")
fileList.append(fname)
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[-1], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[-1], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
# Test with corrupted file as last file in list, skip_bad_files=True
uvTest = UVData()
uvtest.checkWarnings(
uvTest.read,
func_args=[fileList[0:4]],
func_kwargs={"skip_bad_files": True},
nwarnings=10,
message=(
[
"The uvw_array does not match the expected values given the "
"antenna positions."
]
* 9
+ ["Failed to read"]
),
)
uvTrue = UVData()
uvTrue.read(fileList[0:3], skip_bad_files=True)
assert uvTest == uvTrue
# Repeat above test, but with corrupted file as first file in list
os.remove(fileList[3])
uv2 = uv.select(times=np.unique(uv.time_array)[15:19], inplace=False)
fname = str(tmp_path / f"minifile_{3}.uvh5")
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[0], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[0], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
uvTest = UVData()
with pytest.warns(UserWarning) as cm:
uvTest.read(fileList[0:4], skip_bad_files=True)
uvTrue = UVData()
uvTrue.read(fileList[1:4], skip_bad_files=True)
print(cm)
assert len(cm) == 1
assert uvTest == uvTrue
# Repeat above test, but with corrupted file in the middle of the list
os.remove(fileList[0])
uv2 = uv.select(times=np.unique(uv.time_array)[0:4], inplace=False)
fname = str(tmp_path / f"minifile_{0}.uvh5")
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[1], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[1], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
uvTest = UVData()
with pytest.warns(UserWarning) as cm:
uvTest.read(fileList[0:4], skip_bad_files=True)
uvTrue = UVData()
uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=True)
assert len(cm) == 1
assert uvTest == uvTrue
# Test with corrupted file in middle of list, but with skip_bad_files=False
uvTest = UVData()
if err_type == "KeyError":
with pytest.raises(KeyError, match="Unable to open object"):
with pytest.warns(UserWarning, match="Failed to read"):
uvTest.read(fileList[0:4], skip_bad_files=False)
elif err_type == "ValueError":
with pytest.raises(ValueError, match="Nants_data must be equal to"):
with pytest.warns(UserWarning, match="Failed to read"):
uvTest.read(fileList[0:4], skip_bad_files=False)
uvTrue = UVData()
uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=False)
assert uvTest != uvTrue
os.remove(fileList[0])
os.remove(fileList[1])
os.remove(fileList[2])
os.remove(fileList[3])
return
def test_deprecation_warnings_set_phased():
"""
Test the deprecation warnings in set_phased et al.
"""
uv = UVData()
# first call set_phased
with pytest.warns(DeprecationWarning, match="`set_phased` is deprecated"):
uv.set_phased()
assert uv.phase_type == "phased"
assert uv._phase_center_epoch.required is True
assert uv._phase_center_ra.required is True
assert uv._phase_center_dec.required is True
# now call set_drift
with pytest.warns(DeprecationWarning, match="`set_drift` is deprecated"):
uv.set_drift()
assert uv.phase_type == "drift"
assert uv._phase_center_epoch.required is False
assert uv._phase_center_ra.required is False
assert uv._phase_center_dec.required is False
# now call set_unknown_phase_type
with pytest.warns(
DeprecationWarning, match="`set_unknown_phase_type` is deprecated"
):
uv.set_unknown_phase_type()
assert uv.phase_type == "unknown"
assert uv._phase_center_epoch.required is False
assert uv._phase_center_ra.required is False
assert uv._phase_center_dec.required is False
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not in known_telescopes.")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_read_background_lsts():
"""Test reading a file with the lst calc in the background."""
uvd = UVData()
uvd2 = UVData()
testfile = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
uvd.read(testfile, background_lsts=False)
uvd2.read(testfile, background_lsts=True)
assert uvd == uvd2
Adjust for new warnings format
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for uvdata object."""
import pytest
import os
import copy
import itertools
import h5py
import numpy as np
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from pyuvdata import UVData, UVCal
import pyuvdata.utils as uvutils
import pyuvdata.tests as uvtest
from pyuvdata.data import DATA_PATH
# needed for multifile read error test
from pyuvdata.uvdata.tests.test_mwa_corr_fits import filelist as mwa_corr_files
from pyuvdata.uvdata.tests.test_fhd import testfiles as fhd_files
from collections import Counter
@pytest.fixture(scope="function")
def uvdata_props():
required_parameters = [
"_data_array",
"_nsample_array",
"_flag_array",
"_Ntimes",
"_Nbls",
"_Nblts",
"_Nfreqs",
"_Npols",
"_Nspws",
"_uvw_array",
"_time_array",
"_ant_1_array",
"_ant_2_array",
"_lst_array",
"_baseline_array",
"_freq_array",
"_polarization_array",
"_spw_array",
"_integration_time",
"_channel_width",
"_object_name",
"_telescope_name",
"_instrument",
"_telescope_location",
"_history",
"_vis_units",
"_Nants_data",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
"_phase_type",
]
required_properties = [
"data_array",
"nsample_array",
"flag_array",
"Ntimes",
"Nbls",
"Nblts",
"Nfreqs",
"Npols",
"Nspws",
"uvw_array",
"time_array",
"ant_1_array",
"ant_2_array",
"lst_array",
"baseline_array",
"freq_array",
"polarization_array",
"spw_array",
"integration_time",
"channel_width",
"object_name",
"telescope_name",
"instrument",
"telescope_location",
"history",
"vis_units",
"Nants_data",
"Nants_telescope",
"antenna_names",
"antenna_numbers",
"antenna_positions",
"phase_type",
]
extra_parameters = [
"_extra_keywords",
"_x_orientation",
"_antenna_diameters",
"_blt_order",
"_gst0",
"_rdate",
"_earth_omega",
"_dut1",
"_timesys",
"_uvplane_reference_time",
"_phase_center_ra",
"_phase_center_dec",
"_phase_center_epoch",
"_phase_center_frame",
"_eq_coeffs",
"_eq_coeffs_convention",
]
extra_properties = [
"extra_keywords",
"x_orientation",
"antenna_diameters",
"blt_order",
"gst0",
"rdate",
"earth_omega",
"dut1",
"timesys",
"uvplane_reference_time",
"phase_center_ra",
"phase_center_dec",
"phase_center_epoch",
"phase_center_frame",
"eq_coeffs",
"eq_coeffs_convention",
]
other_properties = [
"telescope_location_lat_lon_alt",
"telescope_location_lat_lon_alt_degrees",
"phase_center_ra_degrees",
"phase_center_dec_degrees",
"pyuvdata_version_str",
]
uv_object = UVData()
class DataHolder:
def __init__(
self,
uv_object,
required_parameters,
required_properties,
extra_parameters,
extra_properties,
other_properties,
):
self.uv_object = uv_object
self.required_parameters = required_parameters
self.required_properties = required_properties
self.extra_parameters = extra_parameters
self.extra_properties = extra_properties
self.other_properties = other_properties
uvdata_props = DataHolder(
uv_object,
required_parameters,
required_properties,
extra_parameters,
extra_properties,
other_properties,
)
# yields the data we need but will continue to the del call after tests
yield uvdata_props
# some post-test object cleanup
del uvdata_props
return
@pytest.fixture(scope="session")
def hera_uvh5_master():
# read in test file for the resampling in time functions
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")
uv_object.read(testfile)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def hera_uvh5(hera_uvh5_master):
# read in test file for the resampling in time functions
uv_object = hera_uvh5_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="session")
def paper_uvh5_master():
# read in test file for the resampling in time functions
uv_object = UVData()
uvh5_file = os.path.join(DATA_PATH, "zen.2456865.60537.xy.uvcRREAA.uvh5")
uv_object.read_uvh5(uvh5_file)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def paper_uvh5(paper_uvh5_master):
# read in test file for the resampling in time functions
uv_object = paper_uvh5_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="session")
def bda_test_file_master():
# read in test file for BDA-like data
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "simulated_bda_file.uvh5")
uv_object.read(testfile)
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def bda_test_file(bda_test_file_master):
# read in test file for BDA-like data
uv_object = bda_test_file_master.copy()
yield uv_object
# cleanup
del uv_object
return
@pytest.fixture(scope="function")
def uvdata_data(casa_uvfits):
uv_object = casa_uvfits
class DataHolder:
def __init__(self, uv_object):
self.uv_object = uv_object
self.uv_object2 = uv_object.copy()
uvdata_data = DataHolder(uv_object)
# yields the data we need but will continue to the del call after tests
yield uvdata_data
# some post-test object cleanup
del uvdata_data
return
@pytest.fixture(scope="function")
def uvdata_baseline():
uv_object = UVData()
uv_object.Nants_telescope = 128
uv_object2 = UVData()
uv_object2.Nants_telescope = 2049
class DataHolder:
def __init__(self, uv_object, uv_object2):
self.uv_object = uv_object
self.uv_object2 = uv_object2
uvdata_baseline = DataHolder(uv_object, uv_object2)
# yields the data we need but will continue to the del call after tests
yield uvdata_baseline
# Post test clean-up
del uvdata_baseline
return
@pytest.fixture(scope="session")
def set_uvws_master(hera_uvh5_master):
uv1 = hera_uvh5_master.copy()
# uvws in the file are wrong. reset them.
uv1.set_uvws_from_antenna_positions()
yield uv1
del uv1
return
@pytest.fixture
def uv1_2_set_uvws(set_uvws_master):
uv1 = set_uvws_master.copy()
uv2 = set_uvws_master.copy()
yield uv1, uv2
del uv1, uv2
return
@pytest.fixture()
def uv_phase_time_split(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.reorder_blts(order="time", minor_order="baseline")
uv_raw.reorder_blts(order="time", minor_order="baseline")
uv_phase.phase(ra=0, dec=0, epoch="J2000", use_ant_pos=True)
times = np.unique(uv_phase.time_array)
time_set_1, time_set_2 = times[::2], times[1::2]
uv_phase_1 = uv_phase.select(times=time_set_1, inplace=False)
uv_phase_2 = uv_phase.select(times=time_set_2, inplace=False)
uv_raw_1 = uv_raw.select(times=time_set_1, inplace=False)
uv_raw_2 = uv_raw.select(times=time_set_2, inplace=False)
yield uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw
del uv_phase_1, uv_phase_2, uv_raw_1, uv_raw_2, uv_phase, uv_raw
def test_parameter_iter(uvdata_props):
"""Test expected parameters."""
all_params = []
for prop in uvdata_props.uv_object:
all_params.append(prop)
for a in uvdata_props.required_parameters + uvdata_props.extra_parameters:
assert a in all_params, (
"expected attribute " + a + " not returned in object iterator"
)
def test_required_parameter_iter(uvdata_props):
"""Test expected required parameters."""
# at first it's a metadata_only object, so need to modify required_parameters
required = []
for prop in uvdata_props.uv_object.required():
required.append(prop)
expected_required = copy.copy(uvdata_props.required_parameters)
expected_required.remove("_data_array")
expected_required.remove("_nsample_array")
expected_required.remove("_flag_array")
for a in expected_required:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
uvdata_props.uv_object.data_array = 1
uvdata_props.uv_object.nsample_array = 1
uvdata_props.uv_object.flag_array = 1
required = []
for prop in uvdata_props.uv_object.required():
required.append(prop)
for a in uvdata_props.required_parameters:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
def test_extra_parameter_iter(uvdata_props):
"""Test expected optional parameters."""
extra = []
for prop in uvdata_props.uv_object.extra():
extra.append(prop)
for a in uvdata_props.extra_parameters:
assert a in extra, "expected attribute " + a + " not returned in extra iterator"
def test_unexpected_parameters(uvdata_props):
"""Test for extra parameters."""
expected_parameters = (
uvdata_props.required_parameters + uvdata_props.extra_parameters
)
attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] == "_"]
for a in attributes:
assert a in expected_parameters, (
"unexpected parameter " + a + " found in UVData"
)
def test_unexpected_attributes(uvdata_props):
"""Test for extra attributes."""
expected_attributes = (
uvdata_props.required_properties
+ uvdata_props.extra_properties
+ uvdata_props.other_properties
)
attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] != "_"]
for a in attributes:
assert a in expected_attributes, (
"unexpected attribute " + a + " found in UVData"
)
def test_properties(uvdata_props):
"""Test that properties can be get and set properly."""
prop_dict = dict(
list(
zip(
uvdata_props.required_properties + uvdata_props.extra_properties,
uvdata_props.required_parameters + uvdata_props.extra_parameters,
)
)
)
for k, v in prop_dict.items():
rand_num = np.random.rand()
setattr(uvdata_props.uv_object, k, rand_num)
this_param = getattr(uvdata_props.uv_object, v)
try:
assert rand_num == this_param.value
except AssertionError:
print("setting {prop_name} to a random number failed".format(prop_name=k))
raise
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_metadata_only_property(uvdata_data):
uvdata_data.uv_object.data_array = None
assert uvdata_data.uv_object.metadata_only is False
pytest.raises(ValueError, uvdata_data.uv_object.check)
uvdata_data.uv_object.flag_array = None
assert uvdata_data.uv_object.metadata_only is False
pytest.raises(ValueError, uvdata_data.uv_object.check)
uvdata_data.uv_object.nsample_array = None
assert uvdata_data.uv_object.metadata_only is True
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_equality(uvdata_data):
"""Basic equality test."""
assert uvdata_data.uv_object == uvdata_data.uv_object
@pytest.mark.filterwarnings("ignore:Telescope location derived from obs")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check(uvdata_data):
"""Test simple check function."""
assert uvdata_data.uv_object.check()
# Check variety of special cases
uvdata_data.uv_object.Nants_data += 1
with pytest.raises(
ValueError,
match=(
"Nants_data must be equal to the number of unique values in "
"ant_1_array and ant_2_array"
),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Nants_data -= 1
uvdata_data.uv_object.Nbls += 1
with pytest.raises(
ValueError,
match=(
"Nbls must be equal to the number of unique baselines in the data_array"
),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Nbls -= 1
uvdata_data.uv_object.Ntimes += 1
with pytest.raises(
ValueError,
match=("Ntimes must be equal to the number of unique times in the time_array"),
):
uvdata_data.uv_object.check()
uvdata_data.uv_object.Ntimes -= 1
with pytest.raises(
ValueError,
match=(
"The uvw_array does not match the expected values given the antenna "
"positions."
),
):
uvdata_data.uv_object.check(strict_uvw_antpos_check=True)
# Check case where all data is autocorrelations
# Currently only test files that have autos are fhd files
testdir = os.path.join(DATA_PATH, "fhd_vis_data/")
file_list = [
testdir + "1061316296_flags.sav",
testdir + "1061316296_vis_XX.sav",
testdir + "1061316296_params.sav",
testdir + "1061316296_layout.sav",
testdir + "1061316296_settings.txt",
]
uvdata_data.uv_object.read_fhd(file_list)
uvdata_data.uv_object.select(
blt_inds=np.where(
uvdata_data.uv_object.ant_1_array == uvdata_data.uv_object.ant_2_array
)[0]
)
assert uvdata_data.uv_object.check()
# test auto and cross corr uvw_array
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
autos = np.isclose(uvd.ant_1_array - uvd.ant_2_array, 0.0)
auto_inds = np.where(autos)[0]
cross_inds = np.where(~autos)[0]
# make auto have non-zero uvw coords, assert ValueError
uvd.uvw_array[auto_inds[0], 0] = 0.1
with pytest.raises(
ValueError,
match=("Some auto-correlations have non-zero uvw_array coordinates."),
):
uvd.check()
# make cross have |uvw| zero, assert ValueError
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
uvd.uvw_array[cross_inds[0]][:] = 0.0
with pytest.raises(
ValueError,
match=("Some cross-correlations have near-zero uvw_array magnitudes."),
):
uvd.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_nants_data_telescope_larger(uvdata_data):
# make sure it's okay for Nants_telescope to be strictly greater than Nants_data
uvdata_data.uv_object.Nants_telescope += 1
# add dummy information for "new antenna" to pass object check
uvdata_data.uv_object.antenna_names = np.concatenate(
(uvdata_data.uv_object.antenna_names, ["dummy_ant"])
)
uvdata_data.uv_object.antenna_numbers = np.concatenate(
(uvdata_data.uv_object.antenna_numbers, [20])
)
uvdata_data.uv_object.antenna_positions = np.concatenate(
(uvdata_data.uv_object.antenna_positions, np.zeros((1, 3))), axis=0
)
assert uvdata_data.uv_object.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_ant1_array_not_in_antnums(uvdata_data):
# make sure an error is raised if antennas in ant_1_array not in antenna_numbers
# remove antennas from antenna_names & antenna_numbers by hand
uvdata_data.uv_object.antenna_names = uvdata_data.uv_object.antenna_names[1:]
uvdata_data.uv_object.antenna_numbers = uvdata_data.uv_object.antenna_numbers[1:]
uvdata_data.uv_object.antenna_positions = uvdata_data.uv_object.antenna_positions[
1:, :
]
uvdata_data.uv_object.Nants_telescope = uvdata_data.uv_object.antenna_numbers.size
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.check()
assert str(cm.value).startswith(
"All antennas in ant_1_array must be in antenna_numbers"
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_ant2_array_not_in_antnums(uvdata_data):
# make sure an error is raised if antennas in ant_2_array not in antenna_numbers
# remove antennas from antenna_names & antenna_numbers by hand
uvobj = uvdata_data.uv_object
uvobj.antenna_names = uvobj.antenna_names[:-1]
uvobj.antenna_numbers = uvobj.antenna_numbers[:-1]
uvobj.antenna_positions = uvobj.antenna_positions[:-1]
uvobj.Nants_telescope = uvobj.antenna_numbers.size
with pytest.raises(ValueError) as cm:
uvobj.check()
assert str(cm.value).startswith(
"All antennas in ant_2_array must be in antenna_numbers"
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_converttofiletype(uvdata_data):
fhd_obj = uvdata_data.uv_object._convert_to_filetype("fhd")
uvdata_data.uv_object._convert_from_filetype(fhd_obj)
assert uvdata_data.uv_object == uvdata_data.uv_object2
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object._convert_to_filetype("foo")
assert str(cm.value).startswith("filetype must be uvfits, miriad, fhd, or uvh5")
def test_baseline_to_antnums(uvdata_baseline):
"""Test baseline to antnum conversion for 256 & larger conventions."""
assert uvdata_baseline.uv_object.baseline_to_antnums(67585) == (0, 0)
with pytest.raises(Exception) as cm:
uvdata_baseline.uv_object2.baseline_to_antnums(67585)
assert str(cm.value).startswith(
"error Nants={Nants}>2048"
" not supported".format(Nants=uvdata_baseline.uv_object2.Nants_telescope)
)
ant_pairs = [(10, 20), (280, 310)]
for pair in ant_pairs:
if np.max(np.array(pair)) < 255:
bl = uvdata_baseline.uv_object.antnums_to_baseline(
pair[0], pair[1], attempt256=True
)
ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)
assert pair == ant_pair_out
bl = uvdata_baseline.uv_object.antnums_to_baseline(
pair[0], pair[1], attempt256=False
)
ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)
assert pair == ant_pair_out
def test_baseline_to_antnums_vectorized(uvdata_baseline):
"""Test vectorized antnum to baseline conversion."""
ant_1 = [10, 280]
ant_2 = [20, 310]
baseline_array = uvdata_baseline.uv_object.antnums_to_baseline(ant_1, ant_2)
assert np.array_equal(baseline_array, [88085, 641335])
ant_1_out, ant_2_out = uvdata_baseline.uv_object.baseline_to_antnums(
baseline_array.tolist()
)
assert np.array_equal(ant_1, ant_1_out)
assert np.array_equal(ant_2, ant_2_out)
def test_antnums_to_baselines(uvdata_baseline):
"""Test antums to baseline conversion for 256 & larger conventions."""
assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0) == 67585
assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177
assert uvdata_baseline.uv_object.baseline_to_antnums(594177) == (257, 256)
# Check attempt256
assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0, attempt256=True) == 257
assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177
uvtest.checkWarnings(
uvdata_baseline.uv_object.antnums_to_baseline,
[257, 256],
{"attempt256": True},
message="found > 256 antennas",
)
pytest.raises(Exception, uvdata_baseline.uv_object2.antnums_to_baseline, 0, 0)
# check a len-1 array returns as an array
ant1 = np.array([1])
ant2 = np.array([2])
assert isinstance(
uvdata_baseline.uv_object.antnums_to_baseline(ant1, ant2), np.ndarray
)
def test_known_telescopes():
"""Test known_telescopes method returns expected results."""
uv_object = UVData()
known_telescopes = ["PAPER", "HERA", "MWA"]
# calling np.sort().tolist() because [].sort() acts inplace and returns None
# Before test had None == None
assert (
np.sort(known_telescopes).tolist()
== np.sort(uv_object.known_telescopes()).tolist()
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_hera_diameters(paper_uvh5):
uv_in = paper_uvh5
uv_in.telescope_name = "HERA"
uvtest.checkWarnings(
uv_in.set_telescope_params,
message="antenna_diameters " "is not set. Using known values for HERA.",
)
assert uv_in.telescope_name == "HERA"
assert uv_in.antenna_diameters is not None
uv_in.check()
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_generic_read():
uv_in = UVData()
uvfits_file = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
uv_in.read(uvfits_file, read_data=False)
unique_times = np.unique(uv_in.time_array)
pytest.raises(
ValueError,
uv_in.read,
uvfits_file,
times=unique_times[0:2],
time_range=[unique_times[0], unique_times[1]],
)
pytest.raises(
ValueError,
uv_in.read,
uvfits_file,
antenna_nums=uv_in.antenna_numbers[0],
antenna_names=uv_in.antenna_names[1],
)
pytest.raises(ValueError, uv_in.read, "foo")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_kwargs",
[
{"ra": 0.0, "dec": 0.0, "epoch": "J2000"},
{"ra": Angle("5d").rad, "dec": Angle("30d").rad, "phase_frame": "gcrs"},
{
"ra": Angle("180d").rad,
"dec": Angle("90d"),
"epoch": Time("2010-01-01T00:00:00", format="isot", scale="utc"),
},
],
)
def test_phase_unphase_hera(uv1_2_set_uvws, phase_kwargs):
"""
Read in drift data, phase to an RA/DEC, unphase and check for object equality.
"""
uv1, uv_raw = uv1_2_set_uvws
uv1.phase(**phase_kwargs)
uv1.unphase_to_drift()
# check that phase + unphase gets back to raw
assert uv_raw == uv1
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_one_bl(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that phase + unphase work with one baseline
uv_raw_small = uv_raw.select(blt_inds=[0], inplace=False)
uv_phase_small = uv_raw_small.copy()
uv_phase_small.phase(Angle("23h").rad, Angle("15d").rad)
uv_phase_small.unphase_to_drift()
assert uv_raw_small == uv_phase_small
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_antpos(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that they match if you phase & unphase using antenna locations
# first replace the uvws with the right values
antenna_enu = uvutils.ENU_from_ECEF(
(uv_raw.antenna_positions + uv_raw.telescope_location),
*uv_raw.telescope_location_lat_lon_alt,
)
uvw_calc = np.zeros_like(uv_raw.uvw_array)
unique_times, unique_inds = np.unique(uv_raw.time_array, return_index=True)
for ind, jd in enumerate(unique_times):
inds = np.where(uv_raw.time_array == jd)[0]
for bl_ind in inds:
wh_ant1 = np.where(uv_raw.antenna_numbers == uv_raw.ant_1_array[bl_ind])
ant1_index = wh_ant1[0][0]
wh_ant2 = np.where(uv_raw.antenna_numbers == uv_raw.ant_2_array[bl_ind])
ant2_index = wh_ant2[0][0]
uvw_calc[bl_ind, :] = (
antenna_enu[ant2_index, :] - antenna_enu[ant1_index, :]
)
uv_raw_new = uv_raw.copy()
uv_raw_new.uvw_array = uvw_calc
uv_phase.phase(0.0, 0.0, epoch="J2000", use_ant_pos=True)
uv_phase2 = uv_raw_new.copy()
uv_phase2.phase(0.0, 0.0, epoch="J2000")
# The uvw's only agree to ~1mm. should they be better?
assert np.allclose(uv_phase2.uvw_array, uv_phase.uvw_array, atol=1e-3)
# the data array are just multiplied by the w's for phasing, so a difference
# at the 1e-3 level makes the data array different at that level too.
# -> change the tolerance on data_array for this test
uv_phase2._data_array.tols = (0, 1e-3 * np.amax(np.abs(uv_phase2.data_array)))
assert uv_phase2 == uv_phase
# check that phase + unphase gets back to raw using antpos
uv_phase.unphase_to_drift(use_ant_pos=True)
assert uv_raw_new == uv_phase
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_hera_zenith_timestamp_minimal_changes(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check that phasing to zenith with one timestamp has small changes
# (it won't be identical because of precession/nutation changing the
# coordinate axes)
# use gcrs rather than icrs to reduce differences (don't include abberation)
uv_raw_small = uv_raw.select(times=uv_raw.time_array[0], inplace=False)
uv_phase_simple_small = uv_raw_small.copy()
uv_phase_simple_small.phase_to_time(
time=Time(uv_raw.time_array[0], format="jd"), phase_frame="gcrs"
)
# it's unclear to me how close this should be...
assert np.allclose(
uv_phase_simple_small.uvw_array, uv_raw_small.uvw_array, atol=1e-1
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_to_time_jd_input(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.phase_to_time(uv_raw.time_array[0])
uv_phase.unphase_to_drift()
assert uv_phase == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_to_time_error(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check error if not passing a Time object to phase_to_time
with pytest.raises(TypeError) as cm:
uv_phase.phase_to_time("foo")
assert str(cm.value).startswith("time must be an astropy.time.Time object")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_unphase_drift_data_error(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check error if not passing a Time object to phase_to_time
with pytest.raises(ValueError) as cm:
uv_phase.unphase_to_drift()
assert str(cm.value).startswith("The data is already drift scanning;")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_func,phase_kwargs,err_msg",
[
(
"unphase_to_drift",
{},
"The phasing type of the data is unknown. Set the phase_type",
),
(
"phase",
{"ra": 0, "dec": 0, "epoch": "J2000", "allow_rephase": False},
"The phasing type of the data is unknown. Set the phase_type",
),
(
"phase_to_time",
{"time": 0, "allow_rephase": False},
"The phasing type of the data is unknown. Set the phase_type",
),
],
)
def test_unknown_phase_unphase_hera_errors(
uv1_2_set_uvws, phase_func, phase_kwargs, err_msg
):
uv_phase, uv_raw = uv1_2_set_uvws
# Set phase type to unkown on some tests, ignore on others.
uv_phase._set_unknown_phase_type()
# if this is phase_to_time, use this index set in the dictionary and
# assign the value of the time_array associated with that index
# this is a little hacky, but we cannot acces uv_phase.time_array in the
# parametrize
if phase_func == "phase_to_time":
phase_kwargs["time"] = uv_phase.time_array[phase_kwargs["time"]]
with pytest.raises(ValueError) as cm:
getattr(uv_phase, phase_func)(**phase_kwargs)
assert str(cm.value).startswith(err_msg)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"phase_func,phase_kwargs,err_msg",
[
(
"phase",
{"ra": 0, "dec": 0, "epoch": "J2000", "allow_rephase": False},
"The data is already phased;",
),
(
"phase_to_time",
{"time": 0, "allow_rephase": False},
"The data is already phased;",
),
],
)
def test_phase_rephase_hera_errors(uv1_2_set_uvws, phase_func, phase_kwargs, err_msg):
uv_phase, uv_raw = uv1_2_set_uvws
uv_phase.phase(0.0, 0.0, epoch="J2000")
# if this is phase_to_time, use this index set in the dictionary and
# assign the value of the time_array associated with that index
# this is a little hacky, but we cannot acces uv_phase.time_array in the
# parametrize
if phase_func == "phase_to_time":
phase_kwargs["time"] = uv_phase.time_array[int(phase_kwargs["time"])]
with pytest.raises(ValueError) as cm:
getattr(uv_phase, phase_func)(**phase_kwargs)
assert str(cm.value).startswith(err_msg)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_phase_unphase_hera_bad_frame(uv1_2_set_uvws):
uv_phase, uv_raw = uv1_2_set_uvws
# check errors when trying to phase to an unsupported frame
with pytest.raises(ValueError) as cm:
uv_phase.phase(0.0, 0.0, epoch="J2000", phase_frame="cirs")
assert str(cm.value).startswith("phase_frame can only be set to icrs or gcrs.")
def test_phasing():
"""Use MWA files phased to 2 different places to test phasing."""
file1 = os.path.join(DATA_PATH, "1133866760.uvfits")
file2 = os.path.join(DATA_PATH, "1133866760_rephase.uvfits")
uvd1 = UVData()
uvd2 = UVData()
uvd1.read_uvfits(file1)
uvd2.read_uvfits(file2)
uvd1_drift = uvd1.copy()
uvd1_drift.unphase_to_drift(phase_frame="gcrs")
uvd1_drift_antpos = uvd1.copy()
uvd1_drift_antpos.unphase_to_drift(phase_frame="gcrs", use_ant_pos=True)
uvd2_drift = uvd2.copy()
uvd2_drift.unphase_to_drift(phase_frame="gcrs")
uvd2_drift_antpos = uvd2.copy()
uvd2_drift_antpos.unphase_to_drift(phase_frame="gcrs", use_ant_pos=True)
# the tolerances here are empirical -- based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1_drift.uvw_array, uvd2_drift.uvw_array, atol=2e-2)
assert np.allclose(uvd1_drift_antpos.uvw_array, uvd2_drift_antpos.uvw_array)
uvd2_rephase = uvd2.copy()
uvd2_rephase.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
orig_phase_frame="gcrs",
phase_frame="gcrs",
)
uvd2_rephase_antpos = uvd2.copy()
uvd2_rephase_antpos.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
orig_phase_frame="gcrs",
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1.uvw_array, uvd2_rephase.uvw_array, atol=2e-2)
assert np.allclose(uvd1.uvw_array, uvd2_rephase_antpos.uvw_array, atol=5e-3)
# rephase the drift objects to the original pointing and verify that they
# match
uvd1_drift.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
phase_frame="gcrs",
)
uvd1_drift_antpos.phase(
uvd1.phase_center_ra,
uvd1.phase_center_dec,
uvd1.phase_center_epoch,
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- caused by one unphase/phase cycle.
# the antpos-based phasing differences are based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd1.uvw_array, uvd1_drift.uvw_array, atol=1e-4)
assert np.allclose(uvd1.uvw_array, uvd1_drift_antpos.uvw_array, atol=5e-3)
uvd2_drift.phase(
uvd2.phase_center_ra,
uvd2.phase_center_dec,
uvd2.phase_center_epoch,
phase_frame="gcrs",
)
uvd2_drift_antpos.phase(
uvd2.phase_center_ra,
uvd2.phase_center_dec,
uvd2.phase_center_epoch,
phase_frame="gcrs",
use_ant_pos=True,
)
# the tolerances here are empirical -- caused by one unphase/phase cycle.
# the antpos-based phasing differences are based on what was seen in the
# external phasing test. See the phasing memo in docs/references for
# details.
assert np.allclose(uvd2.uvw_array, uvd2_drift.uvw_array, atol=1e-4)
assert np.allclose(uvd2.uvw_array, uvd2_drift_antpos.uvw_array, atol=2e-2)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_set_phase_unknown(casa_uvfits):
uv_object = casa_uvfits
uv_object._set_unknown_phase_type()
assert uv_object.phase_type == "unknown"
assert not uv_object._phase_center_epoch.required
assert not uv_object._phase_center_ra.required
assert not uv_object._phase_center_dec.required
assert uv_object.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_blts(paper_uvh5):
uv_object = paper_uvh5
old_history = uv_object.history
# fmt: off
blt_inds = np.array([172, 182, 132, 227, 144, 44, 16, 104, 385, 134, 326, 140, 116,
218, 178, 391, 111, 276, 274, 308, 38, 64, 317, 76, 239, 246,
34, 39, 83, 184, 208, 60, 374, 295, 118, 337, 261, 21, 375,
396, 355, 187, 95, 122, 186, 113, 260, 264, 156, 13, 228, 291,
302, 72, 137, 216, 299, 341, 207, 256, 223, 250, 268, 147, 73,
32, 142, 383, 221, 203, 258, 286, 324, 265, 170, 236, 8, 275,
304, 117, 29, 167, 15, 388, 171, 82, 322, 248, 160, 85, 66,
46, 272, 328, 323, 152, 200, 119, 359, 23, 363, 56, 219, 257,
11, 307, 336, 289, 136, 98, 37, 163, 158, 80, 125, 40, 298,
75, 320, 74, 57, 346, 121, 129, 332, 238, 93, 18, 330, 339,
381, 234, 176, 22, 379, 199, 266, 100, 90, 292, 205, 58, 222,
350, 109, 273, 191, 368, 88, 101, 65, 155, 2, 296, 306, 398,
369, 378, 254, 67, 249, 102, 348, 392, 20, 28, 169, 262, 269,
287, 86, 300, 143, 177, 42, 290, 284, 123, 189, 175, 97, 340,
242, 342, 331, 282, 235, 344, 63, 115, 78, 30, 226, 157, 133,
71, 35, 212, 333])
# fmt: on
selected_data = uv_object.data_array[np.sort(blt_inds), :, :, :]
uv_object2 = uv_object.copy()
uv_object2.select(blt_inds=blt_inds)
assert len(blt_inds) == uv_object2.Nblts
# verify that histories are different
assert not uvutils._check_histories(old_history, uv_object2.history)
assert uvutils._check_histories(
old_history + " Downselected to " "specific baseline-times using pyuvdata.",
uv_object2.history,
)
assert np.all(selected_data == uv_object2.data_array)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(blt_inds=blt_inds[np.newaxis, :])
assert len(blt_inds) == uv_object2.Nblts
assert uvutils._check_histories(
old_history + " Downselected to " "specific baseline-times using pyuvdata.",
uv_object2.history,
)
assert np.all(selected_data == uv_object2.data_array)
# check that just doing the metadata works properly
uv_object3 = uv_object.copy()
uv_object3.data_array = None
uv_object3.flag_array = None
uv_object3.nsample_array = None
assert uv_object3.metadata_only is True
uv_object4 = uv_object3.select(blt_inds=blt_inds, inplace=False)
for param in uv_object4:
param_name = getattr(uv_object4, param).name
if param_name not in ["data_array", "flag_array", "nsample_array"]:
assert getattr(uv_object4, param) == getattr(uv_object2, param)
else:
assert getattr(uv_object4, param_name) is None
# also check with inplace=True
uv_object3.select(blt_inds=blt_inds)
assert uv_object3 == uv_object4
# check for errors associated with out of bounds indices
pytest.raises(ValueError, uv_object.select, blt_inds=np.arange(-10, -5))
pytest.raises(
ValueError,
uv_object.select,
blt_inds=np.arange(uv_object.Nblts + 1, uv_object.Nblts + 10),
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_antennas(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_ants = np.unique(
uv_object.ant_1_array.tolist() + uv_object.ant_2_array.tolist()
)
ants_to_keep = np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])
blts_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(antenna_nums=ants_to_keep)
assert len(ants_to_keep) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in ants_to_keep:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(antenna_nums=ants_to_keep[np.newaxis, :])
assert len(ants_to_keep) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in ants_to_keep:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific antennas using pyuvdata.",
uv_object2.history,
)
# now test using antenna_names to specify antennas to keep
uv_object3 = uv_object.copy()
ants_to_keep = np.array(sorted(ants_to_keep))
ant_names = []
for a in ants_to_keep:
ind = np.where(uv_object3.antenna_numbers == a)[0][0]
ant_names.append(uv_object3.antenna_names[ind])
uv_object3.select(antenna_names=ant_names)
assert uv_object2 == uv_object3
# check that it also works with higher dimension array
uv_object3 = uv_object.copy()
ants_to_keep = np.array(sorted(ants_to_keep))
ant_names = []
for a in ants_to_keep:
ind = np.where(uv_object3.antenna_numbers == a)[0][0]
ant_names.append(uv_object3.antenna_names[ind])
uv_object3.select(antenna_names=[ant_names])
assert uv_object2 == uv_object3
# test removing metadata associated with antennas that are no longer present
# also add (different) antenna_diameters to test downselection
uv_object.antenna_diameters = 1.0 * np.ones(
(uv_object.Nants_telescope,), dtype=np.float
)
for i in range(uv_object.Nants_telescope):
uv_object.antenna_diameters += i
uv_object4 = uv_object.copy()
uv_object4.select(antenna_nums=ants_to_keep, keep_all_metadata=False)
assert uv_object4.Nants_telescope == 9
assert set(uv_object4.antenna_numbers) == set(ants_to_keep)
for a in ants_to_keep:
idx1 = uv_object.antenna_numbers.tolist().index(a)
idx2 = uv_object4.antenna_numbers.tolist().index(a)
assert uv_object.antenna_names[idx1] == uv_object4.antenna_names[idx2]
assert np.allclose(
uv_object.antenna_positions[idx1, :], uv_object4.antenna_positions[idx2, :]
)
assert uv_object.antenna_diameters[idx1], uv_object4.antenna_diameters[idx2]
# remove antenna_diameters from object
uv_object.antenna_diameters = None
# check for errors associated with antennas not included in data, bad names
# or providing numbers and names
pytest.raises(
ValueError, uv_object.select, antenna_nums=np.max(unique_ants) + np.arange(1, 3)
)
pytest.raises(ValueError, uv_object.select, antenna_names="test1")
pytest.raises(
ValueError, uv_object.select, antenna_nums=ants_to_keep, antenna_names=ant_names
)
def sort_bl(p):
"""Sort a tuple that starts with a pair of antennas, and may have stuff after."""
if p[1] >= p[0]:
return p
return (p[1], p[0]) + p[2:]
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_bls(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
first_ants = [6, 2, 7, 2, 21, 27, 8]
second_ants = [0, 20, 8, 1, 2, 3, 22]
new_unique_ants = np.unique(first_ants + second_ants)
ant_pairs_to_keep = list(zip(first_ants, second_ants))
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
blts_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(bls=ant_pairs_to_keep)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object2.history,
)
# check using baseline number parameter
uv_object3 = uv_object.copy()
bls_nums_to_keep = [
uv_object.antnums_to_baseline(ant1, ant2) for ant1, ant2 in sorted_pairs_to_keep
]
uv_object3.select(bls=bls_nums_to_keep)
sorted_pairs_object3 = [
sort_bl(p) for p in zip(uv_object3.ant_1_array, uv_object3.ant_2_array)
]
assert len(new_unique_ants) == uv_object3.Nants_data
assert Nblts_selected == uv_object3.Nblts
for ant in new_unique_ants:
assert ant in uv_object3.ant_1_array or ant in uv_object3.ant_2_array
for ant in np.unique(
uv_object3.ant_1_array.tolist() + uv_object3.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object3
for pair in sorted_pairs_object3:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object3.history,
)
# check select with polarizations
first_ants = [6, 2, 7, 2, 21, 27, 8]
second_ants = [0, 20, 8, 1, 2, 3, 22]
pols = ["RR", "RR", "RR", "RR", "RR", "RR", "RR"]
new_unique_ants = np.unique(first_ants + second_ants)
bls_to_keep = list(zip(first_ants, second_ants, pols))
sorted_bls_to_keep = [sort_bl(p) for p in bls_to_keep]
blts_select = [
sort_bl((a1, a2, "RR")) in sorted_bls_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
Nblts_selected = np.sum(blts_select)
uv_object2 = uv_object.copy()
uv_object2.select(bls=bls_to_keep)
sorted_pairs_object2 = [
sort_bl(p) + ("RR",)
for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for bl in sorted_bls_to_keep:
assert bl in sorted_pairs_object2
for bl in sorted_pairs_object2:
assert bl in sorted_bls_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baselines, polarizations using pyuvdata.",
uv_object2.history,
)
# check that you can use numpy integers with out errors:
first_ants = list(map(np.int32, [6, 2, 7, 2, 21, 27, 8]))
second_ants = list(map(np.int32, [0, 20, 8, 1, 2, 3, 22]))
ant_pairs_to_keep = list(zip(first_ants, second_ants))
uv_object2 = uv_object.select(bls=ant_pairs_to_keep, inplace=False)
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert len(new_unique_ants) == uv_object2.Nants_data
assert Nblts_selected == uv_object2.Nblts
for ant in new_unique_ants:
assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in new_unique_ants
for pair in sorted_pairs_to_keep:
assert pair in sorted_pairs_object2
for pair in sorted_pairs_object2:
assert pair in sorted_pairs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific baselines using pyuvdata.",
uv_object2.history,
)
# check that you can specify a single pair without errors
uv_object2.select(bls=(0, 6))
sorted_pairs_object2 = [
sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)
]
assert list(set(sorted_pairs_object2)) == [(0, 6)]
# check for errors associated with antenna pairs not included in data and bad inputs
with pytest.raises(ValueError) as cm:
uv_object.select(bls=list(zip(first_ants, second_ants)) + [0, 6])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[(uv_object.antenna_names[0], uv_object.antenna_names[1])])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(5, 1))
assert str(cm.value).startswith(
"Antenna number 5 is not present in the " "ant_1_array or ant_2_array"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(0, 5))
assert str(cm.value).startswith(
"Antenna number 5 is not present in the " "ant_1_array or ant_2_array"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(27, 27))
assert str(cm.value).startswith("Antenna pair (27, 27) does not have any data")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(6, 0, "RR"), polarizations="RR")
assert str(cm.value).startswith(
"Cannot provide length-3 tuples and also " "specify polarizations."
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=(6, 0, 8))
assert str(cm.value).startswith(
"The third element in each bl must be a " "polarization string"
)
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[])
assert str(cm.value).startswith("bls must be a list of tuples of antenna numbers")
with pytest.raises(ValueError) as cm:
uv_object.select(bls=[100])
assert str(cm.value).startswith("Baseline number 100 is not present in the")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_times(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_times = np.unique(uv_object.time_array)
times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]
Nblts_selected = np.sum([t in times_to_keep for t in uv_object.time_array])
uv_object2 = uv_object.copy()
uv_object2.select(times=times_to_keep)
assert len(times_to_keep) == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(times=times_to_keep[np.newaxis, :])
assert len(times_to_keep) == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
# check for errors associated with times not included in data
pytest.raises(
ValueError,
uv_object.select,
times=[np.min(unique_times) - uv_object.integration_time[0]],
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
times_to_keep = unique_times[
np.nonzero((unique_times <= time_range[1]) & (unique_times >= time_range[0]))
]
Nblts_selected = np.nonzero(
(uv_object.time_array <= time_range[1])
& (uv_object.time_array >= time_range[0])
)[0].size
uv_object2 = uv_object.copy()
uv_object2.select(time_range=time_range)
assert times_to_keep.size == uv_object2.Ntimes
assert Nblts_selected == uv_object2.Nblts
for t in times_to_keep:
assert t in uv_object2.time_array
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific times using pyuvdata.",
uv_object2.history,
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range_no_data(casa_uvfits):
"""Check for error associated with times not included in data."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
with pytest.raises(ValueError) as cm:
uv_object.select(
time_range=[
np.min(unique_times) - uv_object.integration_time[0] * 2,
np.min(unique_times) - uv_object.integration_time[0],
]
)
assert str(cm.value).startswith("No elements in time range")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_and_time_range(casa_uvfits):
"""Check for error setting times and time_range."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]
with pytest.raises(ValueError) as cm:
uv_object.select(time_range=time_range, times=times_to_keep)
assert str(cm.value).startswith('Only one of "times" and "time_range" can be set')
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_time_range_one_elem(casa_uvfits):
"""Check for error if time_range not length 2."""
uv_object = casa_uvfits
unique_times = np.unique(uv_object.time_array)
mean_time = np.mean(unique_times)
time_range = [np.min(unique_times), mean_time]
with pytest.raises(ValueError) as cm:
uv_object.select(time_range=time_range[0])
assert str(cm.value).startswith("time_range must be length 2")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_frequencies_uvfits(casa_uvfits, tmp_path):
uv_object = casa_uvfits
old_history = uv_object.history
freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that selecting one frequency works
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[0])
assert 1 == uv_object2.Nfreqs
assert freqs_to_keep[0] in uv_object2.freq_array
for f in uv_object2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check for errors associated with frequencies not included in data
pytest.raises(
ValueError,
uv_object.select,
frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],
)
# check for warnings and errors associated with unevenly spaced or
# non-contiguous frequencies
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 5, 6]]},
message=[
"Selected frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_uvfits = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 2, 4]]},
message=[
"Selected frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_frequencies_miriad(casa_uvfits, tmp_path):
pytest.importorskip("pyuvdata._miriad")
uv_object = casa_uvfits
old_history = uv_object.history
freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep)
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that selecting one frequency works
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep[0])
assert 1 == uv_object2.Nfreqs
assert freqs_to_keep[0] in uv_object2.freq_array
for f in uv_object2.freq_array:
assert f in [freqs_to_keep[0]]
assert uvutils._check_histories(
old_history + " Downselected to specific frequencies using pyuvdata.",
uv_object2.history,
)
# check for errors associated with frequencies not included in data
pytest.raises(
ValueError,
uv_object.select,
frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],
)
# check for warnings and errors associated with unevenly spaced or
# non-contiguous frequencies
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 5, 6]]},
message=[
"Selected frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_miriad = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)
uv_object2 = uv_object.copy()
uvtest.checkWarnings(
uv_object2.select,
[],
{"frequencies": uv_object2.freq_array[0, [0, 2, 4]]},
message=[
"Selected frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_freq_chans(casa_uvfits):
uv_object = casa_uvfits
old_history = uv_object.history
chans_to_keep = np.arange(12, 22)
uv_object2 = uv_object.copy()
uv_object2.select(freq_chans=chans_to_keep)
assert len(chans_to_keep) == uv_object2.Nfreqs
for chan in chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(freq_chans=chans_to_keep[np.newaxis, :])
assert len(chans_to_keep) == uv_object2.Nfreqs
for chan in chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, chans_to_keep]
assert uvutils._check_histories(
old_history + " Downselected to " "specific frequencies using pyuvdata.",
uv_object2.history,
)
# Test selecting both channels and frequencies
freqs_to_keep = uv_object.freq_array[0, np.arange(20, 30)] # Overlaps with chans
all_chans_to_keep = np.arange(12, 30)
uv_object2 = uv_object.copy()
uv_object2.select(frequencies=freqs_to_keep, freq_chans=chans_to_keep)
assert len(all_chans_to_keep) == uv_object2.Nfreqs
for chan in all_chans_to_keep:
assert uv_object.freq_array[0, chan] in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in uv_object.freq_array[0, all_chans_to_keep]
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_polarizations(casa_uvfits, tmp_path):
uv_object = casa_uvfits
old_history = uv_object.history
pols_to_keep = [-1, -2]
uv_object2 = uv_object.copy()
uv_object2.select(polarizations=pols_to_keep)
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific polarizations using pyuvdata.",
uv_object2.history,
)
# check that it also works with higher dimension array
uv_object2 = uv_object.copy()
uv_object2.select(polarizations=[pols_to_keep])
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to " "specific polarizations using pyuvdata.",
uv_object2.history,
)
# check for errors associated with polarizations not included in data
pytest.raises(ValueError, uv_object2.select, polarizations=[-3, -4])
# check for warnings and errors associated with unevenly spaced polarizations
uvtest.checkWarnings(
uv_object.select,
[],
{"polarizations": uv_object.polarization_array[[0, 1, 3]]},
message=[
"Selected polarization values are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=2,
)
write_file_uvfits = str(tmp_path / "select_test.uvfits")
pytest.raises(ValueError, uv_object.write_uvfits, write_file_uvfits)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select(casa_uvfits):
# now test selecting along all axes at once
uv_object = casa_uvfits
old_history = uv_object.history
# fmt: off
blt_inds = np.array([1057, 461, 1090, 354, 528, 654, 882, 775, 369, 906, 748,
875, 296, 773, 554, 395, 1003, 476, 762, 976, 1285, 874,
717, 383, 1281, 924, 264, 1163, 297, 857, 1258, 1000, 180,
1303, 1139, 393, 42, 135, 789, 713, 527, 1218, 576, 100,
1311, 4, 653, 724, 591, 889, 36, 1033, 113, 479, 322,
118, 898, 1263, 477, 96, 935, 238, 195, 531, 124, 198,
992, 1131, 305, 154, 961, 6, 1175, 76, 663, 82, 637,
288, 1152, 845, 1290, 379, 1225, 1240, 733, 1172, 937, 1325,
817, 416, 261, 1316, 957, 723, 215, 237, 270, 1309, 208,
17, 1028, 895, 574, 166, 784, 834, 732, 1022, 1068, 1207,
356, 474, 313, 137, 172, 181, 925, 201, 190, 1277, 1044,
1242, 702, 567, 557, 1032, 1352, 504, 545, 422, 179, 780,
280, 890, 774, 884])
# fmt: on
ants_to_keep = np.array([11, 6, 20, 26, 2, 27, 7, 14])
ant_pairs_to_keep = [(2, 11), (20, 26), (6, 7), (3, 27), (14, 6)]
sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]
freqs_to_keep = uv_object.freq_array[0, np.arange(31, 39)]
unique_times = np.unique(uv_object.time_array)
times_to_keep = unique_times[[0, 2, 6, 8, 10, 13, 14]]
pols_to_keep = [-1, -3]
# Independently count blts that should be selected
blts_blt_select = [i in blt_inds for i in np.arange(uv_object.Nblts)]
blts_ant_select = [
(a1 in ants_to_keep) & (a2 in ants_to_keep)
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
blts_pair_select = [
sort_bl((a1, a2)) in sorted_pairs_to_keep
for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)
]
blts_time_select = [t in times_to_keep for t in uv_object.time_array]
Nblts_select = np.sum(
[
bi & (ai & pi) & ti
for (bi, ai, pi, ti) in zip(
blts_blt_select, blts_ant_select, blts_pair_select, blts_time_select
)
]
)
uv_object2 = uv_object.copy()
uv_object2.select(
blt_inds=blt_inds,
antenna_nums=ants_to_keep,
bls=ant_pairs_to_keep,
frequencies=freqs_to_keep,
times=times_to_keep,
polarizations=pols_to_keep,
)
assert Nblts_select == uv_object2.Nblts
for ant in np.unique(
uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()
):
assert ant in ants_to_keep
assert len(freqs_to_keep) == uv_object2.Nfreqs
for f in freqs_to_keep:
assert f in uv_object2.freq_array
for f in np.unique(uv_object2.freq_array):
assert f in freqs_to_keep
for t in np.unique(uv_object2.time_array):
assert t in times_to_keep
assert len(pols_to_keep) == uv_object2.Npols
for p in pols_to_keep:
assert p in uv_object2.polarization_array
for p in np.unique(uv_object2.polarization_array):
assert p in pols_to_keep
assert uvutils._check_histories(
old_history + " Downselected to "
"specific baseline-times, antennas, "
"baselines, times, frequencies, "
"polarizations using pyuvdata.",
uv_object2.history,
)
# test that a ValueError is raised if the selection eliminates all blts
pytest.raises(ValueError, uv_object.select, times=unique_times[0], antenna_nums=1)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_not_inplace(casa_uvfits):
# Test non-inplace select
uv_object = casa_uvfits
old_history = uv_object.history
uv1 = uv_object.select(freq_chans=np.arange(32), inplace=False)
uv1 += uv_object.select(freq_chans=np.arange(32, 64), inplace=False)
assert uvutils._check_histories(
old_history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = old_history
assert uv1 == uv_object
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("metadata_only", [True, False])
def test_conjugate_bls(casa_uvfits, metadata_only):
testfile = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
if not metadata_only:
uv1 = casa_uvfits
else:
uv1 = UVData()
uv1.read_uvfits(testfile, read_data=False)
if metadata_only:
assert uv1.metadata_only
# file comes in with ant1<ant2
assert np.min(uv1.ant_2_array - uv1.ant_1_array) >= 0
# check everything swapped & conjugated when go to ant2<ant1
uv2 = uv1.copy()
uv2.conjugate_bls(convention="ant2<ant1")
assert np.min(uv2.ant_1_array - uv2.ant_2_array) >= 0
assert np.allclose(uv1.ant_1_array, uv2.ant_2_array)
assert np.allclose(uv1.ant_2_array, uv2.ant_1_array)
assert np.allclose(
uv1.uvw_array,
-1 * uv2.uvw_array,
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
if not metadata_only:
# complicated because of the polarization swaps
# polarization_array = [-1 -2 -3 -4]
assert np.allclose(
uv1.data_array[:, :, :, :2],
np.conj(uv2.data_array[:, :, :, :2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[:, :, :, 2],
np.conj(uv2.data_array[:, :, :, 3]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[:, :, :, 3],
np.conj(uv2.data_array[:, :, :, 2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
# check everything returned to original values with original convention
uv2.conjugate_bls(convention="ant1<ant2")
assert uv1 == uv2
# conjugate a particular set of blts
blts_to_conjugate = np.arange(uv2.Nblts // 2)
blts_not_conjugated = np.arange(uv2.Nblts // 2, uv2.Nblts)
uv2.conjugate_bls(convention=blts_to_conjugate)
assert np.allclose(
uv1.ant_1_array[blts_to_conjugate], uv2.ant_2_array[blts_to_conjugate]
)
assert np.allclose(
uv1.ant_2_array[blts_to_conjugate], uv2.ant_1_array[blts_to_conjugate]
)
assert np.allclose(
uv1.ant_1_array[blts_not_conjugated], uv2.ant_1_array[blts_not_conjugated]
)
assert np.allclose(
uv1.ant_2_array[blts_not_conjugated], uv2.ant_2_array[blts_not_conjugated]
)
assert np.allclose(
uv1.uvw_array[blts_to_conjugate],
-1 * uv2.uvw_array[blts_to_conjugate],
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
assert np.allclose(
uv1.uvw_array[blts_not_conjugated],
uv2.uvw_array[blts_not_conjugated],
rtol=uv1._uvw_array.tols[0],
atol=uv1._uvw_array.tols[1],
)
if not metadata_only:
# complicated because of the polarization swaps
# polarization_array = [-1 -2 -3 -4]
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, :2],
np.conj(uv2.data_array[blts_to_conjugate, :, :, :2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, :2],
uv2.data_array[blts_not_conjugated, :, :, :2],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, 2],
np.conj(uv2.data_array[blts_to_conjugate, :, :, 3]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, 2],
uv2.data_array[blts_not_conjugated, :, :, 2],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_to_conjugate, :, :, 3],
np.conj(uv2.data_array[blts_to_conjugate, :, :, 2]),
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
assert np.allclose(
uv1.data_array[blts_not_conjugated, :, :, 3],
uv2.data_array[blts_not_conjugated, :, :, 3],
rtol=uv1._data_array.tols[0],
atol=uv1._data_array.tols[1],
)
# check uv half plane conventions
uv2.conjugate_bls(convention="u<0", use_enu=False)
assert np.max(uv2.uvw_array[:, 0]) <= 0
uv2.conjugate_bls(convention="u>0", use_enu=False)
assert np.min(uv2.uvw_array[:, 0]) >= 0
uv2.conjugate_bls(convention="v<0", use_enu=False)
assert np.max(uv2.uvw_array[:, 1]) <= 0
uv2.conjugate_bls(convention="v>0", use_enu=False)
assert np.min(uv2.uvw_array[:, 1]) >= 0
# unphase to drift to test using ENU positions
uv2.unphase_to_drift(use_ant_pos=True)
uv2.conjugate_bls(convention="u<0")
assert np.max(uv2.uvw_array[:, 0]) <= 0
uv2.conjugate_bls(convention="u>0")
assert np.min(uv2.uvw_array[:, 0]) >= 0
uv2.conjugate_bls(convention="v<0")
assert np.max(uv2.uvw_array[:, 1]) <= 0
uv2.conjugate_bls(convention="v>0")
assert np.min(uv2.uvw_array[:, 1]) >= 0
# test errors
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention="foo")
assert str(cm.value).startswith("convention must be one of")
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention=np.arange(5) - 1)
assert str(cm.value).startswith("If convention is an index array")
with pytest.raises(ValueError) as cm:
uv2.conjugate_bls(convention=[uv2.Nblts])
assert str(cm.value).startswith("If convention is an index array")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_reorder_pols(casa_uvfits):
# Test function to fix polarization order
uv1 = casa_uvfits
uv2 = uv1.copy()
uv3 = uv1.copy()
# reorder uv2 manually
order = [1, 3, 2, 0]
uv2.polarization_array = uv2.polarization_array[order]
uv2.data_array = uv2.data_array[:, :, :, order]
uv2.nsample_array = uv2.nsample_array[:, :, :, order]
uv2.flag_array = uv2.flag_array[:, :, :, order]
uv1.reorder_pols(order=order)
assert uv1 == uv2
# Restore original order
uv1 = uv3.copy()
uv2.reorder_pols()
assert uv1 == uv2
uv1.reorder_pols(order="AIPS")
# check that we have aips ordering
aips_pols = np.array([-1, -2, -3, -4]).astype(int)
assert np.all(uv1.polarization_array == aips_pols)
uv2 = uv1.copy()
uv2.reorder_pols(order="CASA")
# check that we have casa ordering
casa_pols = np.array([-1, -3, -4, -2]).astype(int)
assert np.all(uv2.polarization_array == casa_pols)
order = np.array([0, 2, 3, 1])
assert np.all(uv2.data_array == uv1.data_array[:, :, :, order])
assert np.all(uv2.flag_array == uv1.flag_array[:, :, :, order])
uv2.reorder_pols(order="AIPS")
# check that we have aips ordering again
assert uv1 == uv2
# check error on unknown order
pytest.raises(ValueError, uv2.reorder_pols, {"order": "foo"})
# check error if order is an array of the wrong length
with pytest.raises(ValueError) as cm:
uv2.reorder_pols(order=[3, 2, 1])
assert str(cm.value).startswith("If order is an index array, it must")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_reorder_blts(casa_uvfits):
uv1 = casa_uvfits
# test default reordering in detail
uv2 = uv1.copy()
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
assert np.min(np.diff(uv2.time_array)) >= 0
for this_time in np.unique(uv2.time_array):
bls_2 = uv2.baseline_array[np.where(uv2.time_array == this_time)]
bls_1 = uv1.baseline_array[np.where(uv2.time_array == this_time)]
assert bls_1.shape == bls_2.shape
assert np.min(np.diff(bls_2)) >= 0
bl_inds = [np.where(bls_1 == bl)[0][0] for bl in bls_2]
assert np.allclose(bls_1[bl_inds], bls_2)
uvw_1 = uv1.uvw_array[np.where(uv2.time_array == this_time)[0], :]
uvw_2 = uv2.uvw_array[np.where(uv2.time_array == this_time)[0], :]
assert uvw_1.shape == uvw_2.shape
assert np.allclose(uvw_1[bl_inds, :], uvw_2)
data_1 = uv1.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]
data_2 = uv2.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]
assert data_1.shape == data_2.shape
assert np.allclose(data_1[bl_inds, :, :, :], data_2)
# check that ordering by time, ant1 is identical to time, baseline
uv3 = uv1.copy()
uv3.reorder_blts(order="time", minor_order="ant1")
assert uv3.blt_order == ("time", "ant1")
assert np.min(np.diff(uv3.time_array)) >= 0
uv3.blt_order = uv2.blt_order
assert uv2 == uv3
uv3.reorder_blts(order="time", minor_order="ant2")
assert uv3.blt_order == ("time", "ant2")
assert np.min(np.diff(uv3.time_array)) >= 0
# check that loopback works
uv3.reorder_blts()
assert uv2 == uv3
# sort with a specified index array
new_order = np.lexsort((uv3.baseline_array, uv3.time_array))
uv3.reorder_blts(order=new_order)
assert uv3.blt_order is None
assert np.min(np.diff(uv3.time_array)) >= 0
uv3.blt_order = ("time", "baseline")
assert uv2 == uv3
# test sensible defaulting if minor order = major order
uv3.reorder_blts(order="time", minor_order="time")
assert uv2 == uv3
# test all combinations of major, minor order
uv3.reorder_blts(order="baseline")
assert uv3.blt_order == ("baseline", "time")
assert np.min(np.diff(uv3.baseline_array)) >= 0
uv3.reorder_blts(order="ant1")
assert uv3.blt_order == ("ant1", "ant2")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant1", minor_order="time")
assert uv3.blt_order == ("ant1", "time")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant1", minor_order="baseline")
assert uv3.blt_order == ("ant1", "baseline")
assert np.min(np.diff(uv3.ant_1_array)) >= 0
uv3.reorder_blts(order="ant2")
assert uv3.blt_order == ("ant2", "ant1")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="ant2", minor_order="time")
assert uv3.blt_order == ("ant2", "time")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="ant2", minor_order="baseline")
assert uv3.blt_order == ("ant2", "baseline")
assert np.min(np.diff(uv3.ant_2_array)) >= 0
uv3.reorder_blts(order="bda")
assert uv3.blt_order == ("bda",)
assert np.min(np.diff(uv3.integration_time)) >= 0
assert np.min(np.diff(uv3.baseline_array)) >= 0
# test doing conjugation along with a reorder
# the file is already conjugated this way, so should be equal
uv3.reorder_blts(order="time", conj_convention="ant1<ant2")
assert uv2 == uv3
# test errors
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="foo")
assert str(cm.value).startswith("order must be one of")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(5))
assert str(cm.value).startswith("If order is an index array, it must")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(5, dtype=np.float))
assert str(cm.value).startswith("If order is an index array, it must")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order=np.arange(uv3.Nblts), minor_order="time")
assert str(cm.value).startswith(
"Minor order cannot be set if order is an index array"
)
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="bda", minor_order="time")
assert str(cm.value).startswith("minor_order cannot be specified if order is")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="baseline", minor_order="ant1")
assert str(cm.value).startswith("minor_order conflicts with order")
with pytest.raises(ValueError) as cm:
uv3.reorder_blts(order="time", minor_order="foo")
assert str(cm.value).startswith("minor_order can only be one of")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_sum_vis(casa_uvfits):
# check sum_vis
uv_full = casa_uvfits
uv_half = uv_full.copy()
uv_half.data_array = uv_full.data_array / 2
uv_summed = uv_half.sum_vis(uv_half)
assert np.array_equal(uv_summed.data_array, uv_full.data_array)
assert uvutils._check_histories(
uv_half.history + " Visibilities summed " "using pyuvdata.", uv_summed.history
)
# check diff_vis
uv_diffed = uv_full.diff_vis(uv_half)
assert np.array_equal(uv_diffed.data_array, uv_half.data_array)
assert uvutils._check_histories(
uv_full.history + " Visibilities " "differenced using pyuvdata.",
uv_diffed.history,
)
# check in place
uv_summed.diff_vis(uv_half, inplace=True)
assert np.array_equal(uv_summed.data_array, uv_half.data_array)
# check error messages
with pytest.raises(ValueError) as cm:
uv_full.sum_vis("foo")
assert str(cm.value).startswith("Only UVData (or subclass) objects can be")
uv_full.instrument = "foo"
with pytest.raises(ValueError) as cm:
uv_full.sum_vis(uv_half, inplace=True)
assert str(cm.value).startswith("UVParameter instrument " "does not match")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add(casa_uvfits):
uv_full = casa_uvfits
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1 += uv2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add frequencies - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv2 += uv1
uv2.history = uv_full.history
assert uv2 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2 += uv1
uv2.history = uv_full.history
assert uv2 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv3 = uv_full.copy()
ants = uv_full.get_ants()
ants1 = ants[0:6]
ants2 = ants[6:12]
ants3 = ants[12:]
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ants1]
ind2 = [i for i in range(uv2.Nblts) if uv2.ant_1_array[i] in ants2]
ind3 = [i for i in range(uv3.Nblts) if uv3.ant_1_array[i] in ants3]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv3.select(blt_inds=ind3)
uv3.data_array = uv3.data_array[-1::-1, :, :, :]
uv3.nsample_array = uv3.nsample_array[-1::-1, :, :, :]
uv3.flag_array = uv3.flag_array[-1::-1, :, :, :]
uv3.uvw_array = uv3.uvw_array[-1::-1, :]
uv3.time_array = uv3.time_array[-1::-1]
uv3.lst_array = uv3.lst_array[-1::-1]
uv3.ant_1_array = uv3.ant_1_array[-1::-1]
uv3.ant_2_array = uv3.ant_2_array[-1::-1]
uv3.baseline_array = uv3.baseline_array[-1::-1]
uv1 += uv3
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata. Combined data along "
"baseline-time axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, polarizations using "
"pyuvdata. Combined data along "
"baseline-time, polarization axis "
"using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.flag_array[blt_ind1, :, :, 2:] = True
uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.flag_array[blt_ind2, :, :, 0:2] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, frequencies using "
"pyuvdata. Combined data along "
"baseline-time, frequency axis using "
"pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.flag_array[blt_ind1, :, 32:, :] = True
uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.flag_array[blt_ind2, :, 0:32, :] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1 + uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.__add__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[1])
uv2.freq_array += uv2._channel_width.tols[1] / 2.0
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=3,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# test add of autocorr-only and crosscorr-only objects
uv_full = UVData()
uv_full.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
bls = uv_full.get_antpairs()
autos = [bl for bl in bls if bl[0] == bl[1]]
cross = sorted(set(bls) - set(autos))
uv_auto = uv_full.select(bls=autos, inplace=False)
uv_cross = uv_full.select(bls=cross, inplace=False)
uv1 = uv_auto + uv_cross
assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls
uv2 = uv_cross + uv_auto
assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_drift(casa_uvfits):
uv_full = casa_uvfits
uv_full.unphase_to_drift()
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1 += uv2
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, polarizations using "
"pyuvdata. Combined data along "
"baseline-time, polarization "
"axis using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0
uv_ref.flag_array[blt_ind1, :, :, 2:] = True
uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0
uv_ref.flag_array[blt_ind2, :, :, 0:2] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv_ref = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times, frequencies using "
"pyuvdata. Combined data along "
"baseline-time, frequency "
"axis using pyuvdata.",
uv1.history,
)
blt_ind1 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[0 : len(times) // 2]
]
)
blt_ind2 = np.array(
[
ind
for ind in range(uv_full.Nblts)
if uv_full.time_array[ind] in times[len(times) // 2 :]
]
)
# Zero out missing data in reference object
uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0
uv_ref.flag_array[blt_ind1, :, 32:, :] = True
uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0
uv_ref.flag_array[blt_ind2, :, 0:32, :] = True
uv1.history = uv_full.history
assert uv1 == uv_ref
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1 + uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.__add__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.__iadd__,
func_args=[uv2],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1 += uv2
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_break_add(casa_uvfits):
# Test failure modes of add function
uv_full = casa_uvfits
# Wrong class
uv1 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
pytest.raises(ValueError, uv1.__iadd__, np.zeros(5))
# One phased, one not
uv2 = uv_full.copy()
uv2.unphase_to_drift()
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Different units
uv2 = uv_full.copy()
uv2.select(freq_chans=np.arange(32, 64))
uv2.vis_units = "Jy"
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Overlapping data
uv2 = uv_full.copy()
pytest.raises(ValueError, uv1.__iadd__, uv2)
# Different integration_time
uv2 = uv_full.copy()
uv2.select(freq_chans=np.arange(32, 64))
uv2.integration_time *= 2
pytest.raises(ValueError, uv1.__iadd__, uv2)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_error_drift_and_rephase(casa_uvfits, test_func, extra_kwargs):
uv_full = casa_uvfits
with pytest.raises(ValueError) as cm:
getattr(uv_full, test_func)(
uv_full, phase_center_radec=(0, 45), unphase_to_drift=True, **extra_kwargs
)
assert str(cm.value).startswith(
"phase_center_radec cannot be set if " "unphase_to_drift is True."
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_this_phased_unphase_to_drift(uv_phase_time_split, test_func, extra_kwargs):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
func_kwargs = {
"unphase_to_drift": True,
"inplace": False,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_phase_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Unphasing this UVData object to drift"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "drift"
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_other_phased_unphase_to_drift(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
func_kwargs = {
"unphase_to_drift": True,
"inplace": False,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_phase_2],
func_kwargs=func_kwargs,
message=["Unphasing other UVData object to drift"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "drift"
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_this_rephase_new_phase_center(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad)
# phase each half to different spots
uv_raw_1.phase(
ra=0, dec=0, use_ant_pos=True,
)
uv_raw_2.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True
)
# phase original to phase_center_radec
uv_raw.phase(ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
"use_ant_pos": True,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Phasing this UVData object to phase_center_radec"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_other_rephase_new_phase_center(
uv_phase_time_split, test_func, extra_kwargs
):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad)
# phase each half to different spots
uv_raw_1.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,
)
uv_raw_2.phase(
ra=0, dec=0, use_ant_pos=True,
)
# phase original to phase_center_radec
uv_raw.phase(
ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,
)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
"use_ant_pos": True,
}
func_kwargs.update(extra_kwargs)
uv_out = uvtest.checkWarnings(
getattr(uv_raw_1, test_func),
func_args=[uv_raw_2],
func_kwargs=func_kwargs,
message=["Phasing other UVData object to phase_center_radec"],
)
# the histories will be different here
# but everything else should match.
uv_out.history = copy.deepcopy(uv_raw.history)
# ensure baseline time order is the same
# because fast_concat will not order for us
uv_out.reorder_blts(order="time", minor_order="baseline")
assert uv_out.phase_type == "phased"
assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec
assert uv_out == uv_raw
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"test_func,extra_kwargs", [("__add__", {}), ("fast_concat", {"axis": "blt"})]
)
def test_add_error_too_long_phase_center(uv_phase_time_split, test_func, extra_kwargs):
(uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split
phase_center_radec = (Angle("0d").rad, Angle("-30d").rad, 7)
func_kwargs = {
"inplace": False,
"phase_center_radec": phase_center_radec,
}
func_kwargs.update(extra_kwargs)
with pytest.raises(ValueError) as cm:
getattr(uv_phase_1, test_func)(uv_phase_2, **func_kwargs)
assert str(cm.value).startswith("phase_center_radec should have length 2.")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_fast_concat(casa_uvfits):
uv_full = casa_uvfits
# Add frequencies
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uv1.fast_concat(uv2, "freq", inplace=True)
# Check history is correct, before replacing and doing a full object check
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific frequencies using pyuvdata. "
"Combined data along frequency axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add frequencies - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
uvtest.checkWarnings(
uv2.fast_concat,
func_args=[uv1, "freq"],
func_kwargs={"inplace": True},
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
assert uv2.Nfreqs == uv_full.Nfreqs
assert uv2._freq_array != uv_full._freq_array
assert uv2._data_array != uv_full._data_array
# reorder frequencies and test that they are equal
index_array = np.argsort(uv2.freq_array[0, :])
uv2.freq_array = uv2.freq_array[:, index_array]
uv2.data_array = uv2.data_array[:, :, index_array, :]
uv2.nsample_array = uv2.nsample_array[:, :, index_array, :]
uv2.flag_array = uv2.flag_array[:, :, index_array, :]
uv2.history = uv_full.history
assert uv2._freq_array == uv_full._freq_array
assert uv2 == uv_full
# Add polarizations
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv1.fast_concat(uv2, "polarization", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add polarizations - out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uvtest.checkWarnings(
uv2.fast_concat,
func_args=[uv1, "polarization"],
func_kwargs={"inplace": True},
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
assert uv2._polarization_array != uv_full._polarization_array
assert uv2._data_array != uv_full._data_array
# reorder pols
uv2.reorder_pols()
uv2.history = uv_full.history
assert uv2 == uv_full
# Add times
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1.fast_concat(uv2, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Add baselines
uv1 = uv_full.copy()
uv2 = uv_full.copy()
# divide in half to keep in order
ind1 = np.arange(uv1.Nblts // 2)
ind2 = np.arange(uv1.Nblts // 2, uv1.Nblts)
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv1.fast_concat(uv2, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time axis "
"using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1, uv_full
# Add baselines out of order
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv2.fast_concat(uv1, "blt", inplace=True)
# test freq & pol arrays equal
assert uv2._freq_array == uv_full._freq_array
assert uv2._polarization_array == uv_full._polarization_array
# test Nblt length arrays not equal but same shape
assert uv2._ant_1_array != uv_full._ant_1_array
assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape
assert uv2._ant_2_array != uv_full._ant_2_array
assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape
assert uv2._uvw_array != uv_full._uvw_array
assert uv2.uvw_array.shape == uv_full.uvw_array.shape
assert uv2._time_array != uv_full._time_array
assert uv2.time_array.shape == uv_full.time_array.shape
assert uv2._baseline_array != uv_full._baseline_array
assert uv2.baseline_array.shape == uv_full.baseline_array.shape
assert uv2._data_array != uv_full._data_array
assert uv2.data_array.shape == uv_full.data_array.shape
# reorder blts to enable comparison
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
uv2.blt_order = None
uv2.history = uv_full.history
assert uv2 == uv_full
# add baselines such that Nants_data needs to change
uv1 = uv_full.copy()
uv2 = uv_full.copy()
ant_list = list(range(15)) # Roughly half the antennas in the data
# All blts where ant_1 is in list
ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]
ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]
uv1.select(blt_inds=ind1)
uv2.select(blt_inds=ind2)
uv2.fast_concat(uv1, "blt", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific baseline-times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv2.history,
)
# test freq & pol arrays equal
assert uv2._freq_array == uv_full._freq_array
assert uv2._polarization_array == uv_full._polarization_array
# test Nblt length arrays not equal but same shape
assert uv2._ant_1_array != uv_full._ant_1_array
assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape
assert uv2._ant_2_array != uv_full._ant_2_array
assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape
assert uv2._uvw_array != uv_full._uvw_array
assert uv2.uvw_array.shape == uv_full.uvw_array.shape
assert uv2._time_array != uv_full._time_array
assert uv2.time_array.shape == uv_full.time_array.shape
assert uv2._baseline_array != uv_full._baseline_array
assert uv2.baseline_array.shape == uv_full.baseline_array.shape
assert uv2._data_array != uv_full._data_array
assert uv2.data_array.shape == uv_full.data_array.shape
# reorder blts to enable comparison
uv2.reorder_blts()
assert uv2.blt_order == ("time", "baseline")
uv2.blt_order = None
uv2.history = uv_full.history
assert uv2 == uv_full
# Add multiple axes
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(
times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]
)
uv2.select(
times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]
)
pytest.raises(ValueError, uv1.fast_concat, uv2, "blt", inplace=True)
# Another combo
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))
uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))
pytest.raises(ValueError, uv1.fast_concat, uv2, "blt", inplace=True)
# Add without inplace
uv1 = uv_full.copy()
uv2 = uv_full.copy()
times = np.unique(uv_full.time_array)
uv1.select(times=times[0 : len(times) // 2])
uv2.select(times=times[len(times) // 2 :])
uv1 = uv1.fast_concat(uv2, "blt", inplace=False)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific times using pyuvdata. "
"Combined data along baseline-time "
"axis using pyuvdata.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# Check warnings
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(33, 64))
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[3])
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined frequencies are not contiguous",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=[0])
uv2.select(freq_chans=[1])
uv2.freq_array += uv2._channel_width.tols[1] / 2.0
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "freq"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=3,
)
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[3])
uvtest.checkWarnings(
uv1.fast_concat,
func_args=[uv2, "polarization"],
message=[
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"Combined polarizations are not evenly spaced",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
nwarnings=4,
)
# Combining histories
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(polarizations=uv1.polarization_array[0:2])
uv2.select(polarizations=uv2.polarization_array[2:4])
uv2.history += " testing the history. AIPS WTSCAL = 1.0"
uv1.fast_concat(uv2, "polarization", inplace=True)
assert uvutils._check_histories(
uv_full.history + " Downselected to "
"specific polarizations using pyuvdata. "
"Combined data along polarization "
"axis using pyuvdata. testing the history.",
uv1.history,
)
uv1.history = uv_full.history
assert uv1 == uv_full
# test add of autocorr-only and crosscorr-only objects
uv_full = UVData()
uv_full.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
bls = uv_full.get_antpairs()
autos = [bl for bl in bls if bl[0] == bl[1]]
cross = sorted(set(bls) - set(autos))
uv_auto = uv_full.select(bls=autos, inplace=False)
uv_cross = uv_full.select(bls=cross, inplace=False)
uv1 = uv_auto.fast_concat(uv_cross, "blt")
assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls
uv2 = uv_cross.fast_concat(uv_auto, "blt")
assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_fast_concat_errors(casa_uvfits):
uv_full = casa_uvfits
uv1 = uv_full.copy()
uv2 = uv_full.copy()
uv1.select(freq_chans=np.arange(0, 32))
uv2.select(freq_chans=np.arange(32, 64))
pytest.raises(ValueError, uv1.fast_concat, uv2, "foo", inplace=True)
cal = UVCal()
pytest.raises(ValueError, uv1.fast_concat, cal, "freq", inplace=True)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds(casa_uvfits):
# Test function to interpret key as antpair, pol
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant1, ant2, pol))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal([0], indp[0])
# Any of these inputs can also be a tuple of a tuple, so need to be checked twice.
ind1, ind2, indp = uv._key2inds(((ant1, ant2, pol),))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal([0], indp[0])
# Combo with pol as string
ind1, ind2, indp = uv._key2inds((ant1, ant2, uvutils.polnum2str(pol)))
assert np.array_equal([0], indp[0])
ind1, ind2, indp = uv._key2inds(((ant1, ant2, uvutils.polnum2str(pol)),))
assert np.array_equal([0], indp[0])
# Check conjugation
ind1, ind2, indp = uv._key2inds((ant2, ant1, pol))
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal([0], indp[1])
# Conjugation with pol as string
ind1, ind2, indp = uv._key2inds((ant2, ant1, uvutils.polnum2str(pol)))
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal([0], indp[1])
assert np.array_equal([], indp[0])
# Antpair only
ind1, ind2, indp = uv._key2inds((ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
ind1, ind2, indp = uv._key2inds(((ant1, ant2)))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
# Baseline number only
ind1, ind2, indp = uv._key2inds(uv.antnums_to_baseline(ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
ind1, ind2, indp = uv._key2inds((uv.antnums_to_baseline(ant1, ant2),))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.arange(uv.Npols), indp[0])
# Pol number only
ind1, ind2, indp = uv._key2inds(pol)
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
ind1, ind2, indp = uv._key2inds((pol))
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
# Pol string only
ind1, ind2, indp = uv._key2inds("LL")
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([1]), indp[0])
ind1, ind2, indp = uv._key2inds(("LL"))
assert np.array_equal(np.arange(uv.Nblts), ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([1]), indp[0])
# Test invalid keys
pytest.raises(KeyError, uv._key2inds, "I") # pol str not in data
pytest.raises(KeyError, uv._key2inds, -8) # pol num not in data
pytest.raises(KeyError, uv._key2inds, 6) # bl num not in data
pytest.raises(KeyError, uv._key2inds, (1, 1)) # ant pair not in data
pytest.raises(KeyError, uv._key2inds, (1, 1, "rr")) # ant pair not in data
pytest.raises(KeyError, uv._key2inds, (0, 1, "xx")) # pol not in data
# Test autos are handled correctly
uv.ant_2_array[0] = uv.ant_1_array[0]
ind1, ind2, indp = uv._key2inds((ant1, ant1, pol))
assert np.array_equal(ind1, [0])
assert np.array_equal(ind2, [])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols(casa_uvfits):
uv = casa_uvfits
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant2, ant1))
# Pols in data are 'rr', 'll', 'rl', 'lr'
# So conjugated order should be [0, 1, 3, 2]
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal(np.array([]), indp[0])
assert np.array_equal([0, 1, 3, 2], indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_fringe(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
# Mix one instance of this baseline.
uv.ant_1_array[0] = ant2
uv.ant_2_array[0] = ant1
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds((ant1, ant2))
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
assert np.array_equal(np.array([]), indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_bl_fringe(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
# Mix one instance of this baseline.
uv.ant_1_array[0] = ant2
uv.ant_2_array[0] = ant1
uv.baseline_array[0] = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
bl = uvutils.antnums_to_baseline(ant1, ant2, uv.Nants_telescope)
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds(bl)
assert np.array_equal(bltind, ind1)
assert np.array_equal(np.array([]), ind2)
assert np.array_equal(np.array([0]), indp[0])
assert np.array_equal(np.array([]), indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_missing_data(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pytest.raises(KeyError, uv._key2inds, (ant2, ant1))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_bls(casa_uvfits):
uv = casa_uvfits
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
ind1, ind2, indp = uv._key2inds(bl)
# Pols in data are 'rr', 'll', 'rl', 'lr'
# So conjugated order should be [0, 1, 3, 2]
assert np.array_equal(bltind, ind2)
assert np.array_equal(np.array([]), ind1)
assert np.array_equal(np.array([]), indp[0])
assert np.array_equal([0, 1, 3, 2], indp[1])
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_key2inds_conj_all_pols_missing_data_bls(casa_uvfits):
uv = casa_uvfits
uv.select(polarizations=["rl"])
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)
pytest.raises(KeyError, uv._key2inds, bl)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_smart_slicing(casa_uvfits):
# Test function to slice data
uv = casa_uvfits
# ind1 reg, ind2 empty, pol reg
ind1 = 10 * np.arange(9)
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a view was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 5.43
assert d[1, 0, 0] == uv.data_array[ind1[1], 0, 0, indp[0]]
# force copy
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), force_copy=True)
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 4.3
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 reg, ind2 empty, pol not reg
ind1 = 10 * np.arange(9)
ind2 = []
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 1.2
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 not reg, ind2 empty, pol reg
ind1 = [0, 4, 5]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 8.2
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 not reg, ind2 empty, pol not reg
ind1 = [0, 4, 5]
ind2 = []
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
assert not d.flags.writeable
# Ensure a copy was returned
uv.data_array[ind1[1], 0, 0, indp[0]] = 3.4
assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]
# ind1 empty, ind2 reg, pol reg
# Note conjugation test ensures the result is a copy, not a view.
ind1 = []
ind2 = 10 * np.arange(9)
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 reg, pol not reg
ind1 = []
ind2 = 10 * np.arange(9)
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 not reg, pol reg
ind1 = []
ind2 = [1, 4, 5, 10]
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1 empty, ind2 not reg, pol not reg
ind1 = []
ind2 = [1, 4, 5, 10]
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
dcheck = uv.data_array[ind2, :, :, :]
dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))
assert np.all(d == dcheck)
# ind1, ind2 not empty, pol reg
ind1 = np.arange(20)
ind2 = np.arange(30, 40)
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))
dcheck = np.append(
uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0
)
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# ind1, ind2 not empty, pol not reg
ind1 = np.arange(20)
ind2 = np.arange(30, 40)
indp = [0, 1, 3]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))
dcheck = np.append(
uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0
)
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# test single element
ind1 = [45]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp], axis=1)
assert np.all(d == dcheck)
# test single element
ind1 = []
ind2 = [45]
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))
assert np.all(d == np.conj(dcheck))
# Full squeeze
ind1 = [45]
ind2 = []
indp = [0, 1]
d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), squeeze="full")
dcheck = uv.data_array[ind1, :, :, :]
dcheck = np.squeeze(dcheck[:, :, :, indp])
assert np.all(d == dcheck)
# Test invalid squeeze
pytest.raises(
ValueError,
uv._smart_slicing,
uv.data_array,
ind1,
ind2,
(indp, []),
squeeze="notasqueeze",
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_data(casa_uvfits):
# Test get_data function for easy access to data
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.data_array[bltind, :, :, 0])
d = uv.get_data(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_data(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_data((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_data((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_data(ant2, ant1, pol)
assert np.all(dcheck == np.conj(d))
# Check cross pol conjugation
d = uv.get_data(ant2, ant1, uv.polarization_array[2])
d1 = uv.get_data(ant1, ant2, uv.polarization_array[3])
assert np.all(d == np.conj(d1))
# Antpair only
dcheck = np.squeeze(uv.data_array[bltind, :, :, :])
d = uv.get_data(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.data_array[:, :, :, 0])
d = uv.get_data(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_flags(casa_uvfits):
# Test function for easy access to flags
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.flag_array[bltind, :, :, 0])
d = uv.get_flags(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_flags(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_flags((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_flags((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_flags(ant2, ant1, pol)
assert np.all(dcheck == d)
assert d.dtype == np.bool
# Antpair only
dcheck = np.squeeze(uv.flag_array[bltind, :, :, :])
d = uv.get_flags(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.flag_array[:, :, :, 0])
d = uv.get_flags(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_nsamples(casa_uvfits):
# Test function for easy access to nsample array
uv = casa_uvfits
# Get an antpair/pol combo
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = np.squeeze(uv.nsample_array[bltind, :, :, 0])
d = uv.get_nsamples(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_nsamples(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_nsamples((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_nsamples((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_nsamples(ant2, ant1, pol)
assert np.all(dcheck == d)
# Antpair only
dcheck = np.squeeze(uv.nsample_array[bltind, :, :, :])
d = uv.get_nsamples(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
dcheck = np.squeeze(uv.nsample_array[:, :, :, 0])
d = uv.get_nsamples(pol)
assert np.all(dcheck == d)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind(paper_uvh5):
# Test for baseline-time axis indexer
uv = paper_uvh5
# get indices
inds = uv.antpair2ind(0, 1, ordered=False)
# fmt: off
np.testing.assert_array_equal(
inds,
np.array(
[
1, 22, 43, 64, 85, 106, 127, 148, 169,
190, 211, 232, 253, 274, 295, 316, 337,
358, 379
]
)
)
# fmt: on
assert np.issubdtype(inds.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_conj(paper_uvh5):
# conjugate (and use key rather than arg expansion)
uv = paper_uvh5
inds = uv.antpair2ind(0, 1, ordered=False)
inds2 = uv.antpair2ind((1, 0), ordered=False)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_ordered(paper_uvh5):
# test ordered
uv = paper_uvh5
inds = uv.antpair2ind(0, 1, ordered=False)
# make sure conjugated baseline returns nothing
inds2 = uv.antpair2ind(1, 0, ordered=True)
assert inds2.size == 0
# now use baseline actually in data
inds2 = uv.antpair2ind(0, 1, ordered=True)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_autos(paper_uvh5):
# test autos w/ and w/o ordered
uv = paper_uvh5
inds = uv.antpair2ind(0, 0, ordered=True)
inds2 = uv.antpair2ind(0, 0, ordered=False)
np.testing.assert_array_equal(inds, inds2)
assert np.issubdtype(inds.dtype, np.integer)
assert np.issubdtype(inds2.dtype, np.integer)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpair2ind_exceptions(paper_uvh5):
# test exceptions
uv = paper_uvh5
with pytest.raises(ValueError, match="antpair2ind must be fed an antpair tuple"):
uv.antpair2ind(1)
with pytest.raises(ValueError, match="antpair2ind must be fed an antpair tuple"):
uv.antpair2ind("bar", "foo")
with pytest.raises(ValueError, match="ordered must be a boolean"):
uv.antpair2ind(0, 1, "foo")
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_times(casa_uvfits):
# Test function for easy access to times, to work in conjunction with get_data
uv = casa_uvfits
# Get an antpair/pol combo (pol shouldn't actually effect result)
ant1 = uv.ant_1_array[0]
ant2 = uv.ant_2_array[0]
pol = uv.polarization_array[0]
bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]
dcheck = uv.time_array[bltind]
d = uv.get_times(ant1, ant2, pol)
assert np.all(dcheck == d)
d = uv.get_times(ant1, ant2, uvutils.polnum2str(pol))
assert np.all(dcheck == d)
d = uv.get_times((ant1, ant2, pol))
assert np.all(dcheck == d)
with pytest.raises(ValueError) as cm:
uv.get_times((ant1, ant2, pol), (ant1, ant2, pol))
assert str(cm.value).startswith("no more than 3 key values can be passed")
# Check conjugation
d = uv.get_times(ant2, ant1, pol)
assert np.all(dcheck == d)
# Antpair only
d = uv.get_times(ant1, ant2)
assert np.all(dcheck == d)
# Pol number only
d = uv.get_times(pol)
assert np.all(d == uv.time_array)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_antpairpol_iter(casa_uvfits):
# Test generator
uv = casa_uvfits
pol_dict = {
uvutils.polnum2str(uv.polarization_array[i]): i for i in range(uv.Npols)
}
keys = []
pols = set()
bls = set()
for key, d in uv.antpairpol_iter():
keys += key
bl = uv.antnums_to_baseline(key[0], key[1])
blind = np.where(uv.baseline_array == bl)[0]
bls.add(bl)
pols.add(key[2])
dcheck = np.squeeze(uv.data_array[blind, :, :, pol_dict[key[2]]])
assert np.all(dcheck == d)
assert len(bls) == len(uv.get_baseline_nums())
assert len(pols) == uv.Npols
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_ants(casa_uvfits):
# Test function to get unique antennas in data
uv = casa_uvfits
ants = uv.get_ants()
for ant in ants:
assert (ant in uv.ant_1_array) or (ant in uv.ant_2_array)
for ant in uv.ant_1_array:
assert ant in ants
for ant in uv.ant_2_array:
assert ant in ants
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_enu_antpos():
uvd = UVData()
uvd.read_uvh5(os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5"))
# no center, no pick data ants
antpos, ants = uvd.get_ENU_antpos(center=False, pick_data_ants=False)
assert len(ants) == 113
assert np.isclose(antpos[0, 0], 19.340211050751535)
assert ants[0] == 0
# test default behavior
antpos2, ants = uvd.get_ENU_antpos()
assert np.all(antpos == antpos2)
# center
antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=False)
assert np.isclose(antpos[0, 0], 22.472442651767714)
# pick data ants
antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=True)
assert ants[0] == 9
assert np.isclose(antpos[0, 0], -0.0026981323386223721)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_telescope_loc_xyz_check(paper_uvh5, tmp_path):
# test that improper telescope locations can still be read
uv = paper_uvh5
uv.telescope_location = uvutils.XYZ_from_LatLonAlt(*uv.telescope_location)
# fix LST values
uv.set_lsts_from_time_array()
fname = str(tmp_path / "test.uvh5")
uv.write_uvh5(fname, run_check=False, check_extra=False, clobber=True)
# try to read file without checks (passing is implicit)
uv.read(fname, run_check=False)
# try to read without checks: assert it fails
pytest.raises(ValueError, uv.read, fname)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_pols(casa_uvfits):
# Test function to get unique polarizations in string format
uv = casa_uvfits
pols = uv.get_pols()
pols_data = ["rr", "ll", "lr", "rl"]
assert sorted(pols) == sorted(pols_data)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_pols_x_orientation(paper_uvh5):
uv_in = paper_uvh5
uv_in.x_orientation = "east"
pols = uv_in.get_pols()
pols_data = ["en"]
assert pols == pols_data
uv_in.x_orientation = "north"
pols = uv_in.get_pols()
pols_data = ["ne"]
assert pols == pols_data
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_get_feedpols(casa_uvfits):
# Test function to get unique antenna feed polarizations in data. String format.
uv = casa_uvfits
pols = uv.get_feedpols()
pols_data = ["r", "l"]
assert sorted(pols) == sorted(pols_data)
# Test break when pseudo-Stokes visibilities are present
uv.polarization_array[0] = 1 # pseudo-Stokes I
pytest.raises(ValueError, uv.get_feedpols)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_parse_ants(casa_uvfits):
# Test function to get correct antenna pairs and polarizations
uv = casa_uvfits
# All baselines
ant_str = "all"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Auto correlations
ant_str = "auto"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(ant_pairs_nums) == Counter([])
assert isinstance(polarizations, type(None))
# Cross correlations
ant_str = "cross"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(uv.get_antpairs()) == Counter(ant_pairs_nums)
assert isinstance(polarizations, type(None))
# pseudo-Stokes params
ant_str = "pI,pq,pU,pv"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
pols_expected = [4, 3, 2, 1]
assert isinstance(ant_pairs_nums, type(None))
assert Counter(polarizations) == Counter(pols_expected)
# Unparsible string
ant_str = "none"
pytest.raises(ValueError, uv.parse_ants, ant_str)
# Single antenna number
ant_str = "0"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
# fmt: off
ant_pairs_expected = [(0, 1), (0, 2), (0, 3), (0, 6), (0, 7), (0, 8),
(0, 11), (0, 14), (0, 18), (0, 19), (0, 20),
(0, 21), (0, 22), (0, 23), (0, 24), (0, 26),
(0, 27)]
# fmt: on
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single antenna number not in the data
ant_str = "10"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Antenna"
)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Single antenna number with polarization, both not in the data
ant_str = "10x"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants,
[ant_str],
{},
nwarnings=2,
message=["Warning: Antenna", "Warning: Polarization"],
)
assert isinstance(ant_pairs_nums, type(None))
assert isinstance(polarizations, type(None))
# Multiple antenna numbers as list
ant_str = "22,26"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
# fmt: off
ant_pairs_expected = [(0, 22), (0, 26), (1, 22), (1, 26), (2, 22), (2, 26),
(3, 22), (3, 26), (6, 22), (6, 26), (7, 22),
(7, 26), (8, 22), (8, 26), (11, 22), (11, 26),
(14, 22), (14, 26), (18, 22), (18, 26),
(19, 22), (19, 26), (20, 22), (20, 26),
(21, 22), (21, 26), (22, 23), (22, 24),
(22, 26), (22, 27), (23, 26), (24, 26),
(26, 27)]
# fmt: on
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single baseline
ant_str = "1_3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Single baseline with polarization
ant_str = "1l_3r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3)]
pols_expected = [-4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Single baseline with single polarization in first entry
ant_str = "1l_3,2x_3"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 3), (2, 3)]
pols_expected = [-2, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Single baseline with single polarization in last entry
ant_str = "1_3l,2_3x"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 3), (2, 3)]
pols_expected = [-2, -3]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines as list
ant_str = "1_2,1_3,1_11"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Multiples baselines with polarizations as list
ant_str = "1r_2l,1l_3l,1r_11r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]
pols_expected = [-1, -2, -3]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Specific baselines with parenthesis
ant_str = "(1,3)_11"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11), (3, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Specific baselines with parenthesis
ant_str = "1_(3,11)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Antenna numbers with polarizations
ant_str = "(1l,2r)_(3l,6r)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols_expected = [-1, -2, -3, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Antenna numbers with - for avoidance
ant_str = "1_(-3,11)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11)]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Remove specific antenna number
ant_str = "1,-3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Remove specific baseline (same expected antenna pairs as above example)
ant_str = "1,-1_3"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Antenna numbers with polarizations and - for avoidance
ant_str = "1l_(-3r,11l)"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 11)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Antenna numbers and pseudo-Stokes parameters
ant_str = "(1l,2r)_(3l,6r),pI,pq"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols_expected = [2, 1, -1, -2, -3, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines with multiple polarizations, one pol to be removed
ant_str = "1l_2,1l_3,-1l_3r"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = [(1, 2), (1, 3)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Multiple baselines with multiple polarizations, one pol (not in data)
# to be removed
ant_str = "1l_2,1l_3,-1x_3y"
ant_pairs_nums, polarizations = uvtest.checkWarnings(
uv.parse_ants, [ant_str], {}, nwarnings=1, message="Warning: Polarization"
)
ant_pairs_expected = [(1, 2), (1, 3)]
pols_expected = [-2, -4]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Test print toggle on single baseline with polarization
ant_str = "1l_2l"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str, print_toggle=True)
ant_pairs_expected = [(1, 2)]
pols_expected = [-2]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert Counter(polarizations) == Counter(pols_expected)
# Test ant_str='auto' on file with auto correlations
uv = UVData()
testfile = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5")
uv.read(testfile)
ant_str = "auto"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_nums = [
9,
10,
20,
22,
31,
43,
53,
64,
65,
72,
80,
81,
88,
89,
96,
97,
104,
105,
112,
]
ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]
assert Counter(ant_pairs_nums) == Counter(ant_pairs_autos)
assert isinstance(polarizations, type(None))
# Test cross correlation extraction on data with auto + cross
ant_str = "cross"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_cross = list(itertools.combinations(ant_nums, 2))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_cross)
assert isinstance(polarizations, type(None))
# Remove only polarization of single baseline
ant_str = "all,-9x_10x"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = ant_pairs_autos + ant_pairs_cross
ant_pairs_expected.remove((9, 10))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
# Test appending all to beginning of strings that start with -
ant_str = "-9"
ant_pairs_nums, polarizations = uv.parse_ants(ant_str)
ant_pairs_expected = ant_pairs_autos + ant_pairs_cross
for ant_i in ant_nums:
ant_pairs_expected.remove((9, ant_i))
assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)
assert isinstance(polarizations, type(None))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_select_with_ant_str(casa_uvfits):
# Test select function with ant_str argument
uv = casa_uvfits
inplace = False
# All baselines
ant_str = "all"
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Cross correlations
ant_str = "cross"
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# All baselines in data are cross correlations
# Single antenna number
ant_str = "0"
ant_pairs = [
(0, 1),
(0, 2),
(0, 3),
(0, 6),
(0, 7),
(0, 8),
(0, 11),
(0, 14),
(0, 18),
(0, 19),
(0, 20),
(0, 21),
(0, 22),
(0, 23),
(0, 24),
(0, 26),
(0, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single antenna number not present in data
ant_str = "10"
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Antenna",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# Multiple antenna numbers as list
ant_str = "22,26"
ant_pairs = [
(0, 22),
(0, 26),
(1, 22),
(1, 26),
(2, 22),
(2, 26),
(3, 22),
(3, 26),
(6, 22),
(6, 26),
(7, 22),
(7, 26),
(8, 22),
(8, 26),
(11, 22),
(11, 26),
(14, 22),
(14, 26),
(18, 22),
(18, 26),
(19, 22),
(19, 26),
(20, 22),
(20, 26),
(21, 22),
(21, 26),
(22, 23),
(22, 24),
(22, 26),
(22, 27),
(23, 26),
(24, 26),
(26, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single baseline
ant_str = "1_3"
ant_pairs = [(1, 3)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Single baseline with polarization
ant_str = "1l_3r"
ant_pairs = [(1, 3)]
pols = ["lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Single baseline with single polarization in first entry
ant_str = "1l_3,2x_3"
# x,y pols not present in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Polarization",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# with polarizations in data
ant_str = "1l_3,2_3"
ant_pairs = [(1, 3), (2, 3)]
pols = ["ll", "lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Single baseline with single polarization in last entry
ant_str = "1_3l,2_3x"
# x,y pols not present in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Polarization",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
# with polarizations in data
ant_str = "1_3l,2_3"
ant_pairs = [(1, 3), (2, 3)]
pols = ["ll", "rl"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Multiple baselines as list
ant_str = "1_2,1_3,1_10"
# Antenna number 10 not in data
uv2 = uvtest.checkWarnings(
uv.select,
[],
{"ant_str": ant_str, "inplace": inplace},
nwarnings=2,
message=[
"Warning: Antenna",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
ant_pairs = [(1, 2), (1, 3)]
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Multiples baselines with polarizations as list
ant_str = "1r_2l,1l_3l,1r_11r"
ant_pairs = [(1, 2), (1, 3), (1, 11)]
pols = ["rr", "ll", "rl"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Specific baselines with parenthesis
ant_str = "(1,3)_11"
ant_pairs = [(1, 11), (3, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Specific baselines with parenthesis
ant_str = "1_(3,11)"
ant_pairs = [(1, 3), (1, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Antenna numbers with polarizations
ant_str = "(1l,2r)_(3l,6r)"
ant_pairs = [(1, 3), (1, 6), (2, 3), (2, 6)]
pols = ["rr", "ll", "rl", "lr"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Antenna numbers with - for avoidance
ant_str = "1_(-3,11)"
ant_pairs = [(1, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
ant_str = "(-1,3)_11"
ant_pairs = [(3, 11)]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove specific antenna number
ant_str = "1,-3"
ant_pairs = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove specific baseline
ant_str = "1,-1_3"
ant_pairs = [
(0, 1),
(1, 2),
(1, 6),
(1, 7),
(1, 8),
(1, 11),
(1, 14),
(1, 18),
(1, 19),
(1, 20),
(1, 21),
(1, 22),
(1, 23),
(1, 24),
(1, 26),
(1, 27),
]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Antenna numbers with polarizations and - for avoidance
ant_str = "1l_(-3r,11l)"
ant_pairs = [(1, 11)]
pols = ["ll"]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(pols)
# Test pseudo-Stokes params with select
ant_str = "pi,pQ"
pols = ["pQ", "pI"]
uv.polarization_array = np.array([4, 3, 2, 1])
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())
assert Counter(uv2.get_pols()) == Counter(pols)
# Test ant_str = 'auto' on file with auto correlations
uv = UVData()
testfile = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA.uvh5")
uv.read(testfile)
ant_str = "auto"
ant_nums = [
9,
10,
20,
22,
31,
43,
53,
64,
65,
72,
80,
81,
88,
89,
96,
97,
104,
105,
112,
]
ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_autos)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Test cross correlation extraction on data with auto + cross
ant_str = "cross"
ant_pairs_cross = list(itertools.combinations(ant_nums, 2))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_cross)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Remove only polarization of single baseline
ant_str = "all,-9x_10x"
ant_pairs = ant_pairs_autos + ant_pairs_cross
ant_pairs.remove((9, 10))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
# Test appending all to beginning of strings that start with -
ant_str = "-9"
ant_pairs = ant_pairs_autos + ant_pairs_cross
for ant_i in ant_nums:
ant_pairs.remove((9, ant_i))
uv2 = uv.select(ant_str=ant_str, inplace=inplace)
assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)
assert Counter(uv2.get_pols()) == Counter(uv.get_pols())
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize(
"kwargs,message",
[
(
{"ant_str": "", "antenna_nums": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "antenna_names": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "bls": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
(
{"ant_str": "", "polarizations": []},
"Cannot provide ant_str with antenna_nums, antenna_names, bls, or "
"polarizations.",
),
({"ant_str": "auto"}, "There is no data matching ant_str=auto in this object."),
(
{"ant_str": "pI,pq,pU,pv"},
"Polarization 4 is not present in the polarization_array",
),
({"ant_str": "none"}, "Unparsible argument none"),
],
)
def test_select_with_ant_str_errors(casa_uvfits, kwargs, message):
uv = casa_uvfits
with pytest.raises(ValueError, match=message):
uv.select(**kwargs)
def test_set_uvws_from_antenna_pos():
# Test set_uvws_from_antenna_positions function with phased data
uv_object = UVData()
testfile = os.path.join(DATA_PATH, "1133866760.uvfits")
uv_object.read_uvfits(testfile)
orig_uvw_array = np.copy(uv_object.uvw_array)
with pytest.raises(ValueError) as cm:
uv_object.set_uvws_from_antenna_positions()
assert str(cm.value).startswith("UVW calculation requires unphased data.")
with pytest.raises(ValueError) as cm:
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "xyz"],
message="Data will be unphased",
)
assert str(cm.value).startswith("Invalid parameter orig_phase_frame.")
with pytest.raises(ValueError) as cm:
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "gcrs", "xyz"],
message="Data will be unphased",
)
assert str(cm.value).startswith("Invalid parameter output_phase_frame.")
uvtest.checkWarnings(
uv_object.set_uvws_from_antenna_positions,
[True, "gcrs", "gcrs"],
message="Data will be unphased",
)
max_diff = np.amax(np.absolute(np.subtract(orig_uvw_array, uv_object.uvw_array)))
assert np.isclose(max_diff, 0.0, atol=2)
def test_get_antenna_redundancies():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
old_bl_array = np.copy(uv0.baseline_array)
red_gps, centers, lengths = uv0.get_redundancies(
use_antpos=True, include_autos=False, conjugate_bls=True
)
# new and old baseline Numbers are not the same (different conjugation)
assert not np.allclose(uv0.baseline_array, old_bl_array)
# assert all baselines are in the data (because it's conjugated to match)
for i, gp in enumerate(red_gps):
for bl in gp:
assert bl in uv0.baseline_array
# conjugate data differently
uv0.conjugate_bls(convention="ant1<ant2")
new_red_gps, new_centers, new_lengths, conjs = uv0.get_redundancies(
use_antpos=True, include_autos=False, include_conjugates=True
)
assert conjs is None
apos, anums = uv0.get_ENU_antpos()
new_red_gps, new_centers, new_lengths = uvutils.get_antenna_redundancies(
anums, apos, include_autos=False
)
# all redundancy info is the same
assert red_gps == new_red_gps
assert np.allclose(centers, new_centers)
assert np.allclose(lengths, new_lengths)
@pytest.mark.parametrize("method", ("select", "average"))
@pytest.mark.parametrize("reconjugate", (True, False))
@pytest.mark.parametrize("flagging_level", ("none", "some", "all"))
def test_redundancy_contract_expand(method, reconjugate, flagging_level):
# Test that a UVData object can be reduced to one baseline from each redundant group
# and restored to its original form.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
# Fails at lower precision because some baselines fall into multiple
# redundant groups
tol = 0.02
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds] *= 0
uv0.data_array[inds] += complex(gp_ind)
index_bls = [gp[0] for gp in red_gps]
if flagging_level == "none":
assert np.all(~uv0.flag_array)
elif flagging_level == "some":
# flag all the index baselines in a redundant group
for bl in index_bls:
bl_locs = np.where(uv0.baseline_array == bl)
uv0.flag_array[bl_locs, :, :, :] = True
elif flagging_level == "all":
uv0.flag_array[:] = True
uv0.check()
assert np.all(uv0.flag_array)
if reconjugate:
uv0.conjugate_bls()
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
if method == "average":
gp_bl_use = []
nbls_group = []
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv0.baseline_array]
nbls_group.append(len(bls_init))
bl_use = [bl for bl in gp if bl in uv2.baseline_array]
if len(bl_use) == 0:
# not all possible baselines were present in uv0
gp_bl_use.append(None)
else:
assert len(bl_use) == 1
gp_bl_use.append(bl_use[0])
for gp_ind, bl in enumerate(gp_bl_use):
if bl is None:
continue
if flagging_level == "none" or flagging_level == "all":
assert np.all(uv2.get_nsamples(bl) == nbls_group[gp_ind])
else:
assert np.all(uv2.get_nsamples(bl) == max((nbls_group[gp_ind] - 1), 1))
if flagging_level == "all":
assert np.all(uv2.flag_array)
else:
for gp_ind, bl in enumerate(gp_bl_use):
if nbls_group[gp_ind] > 1:
assert np.all(~uv2.get_flags(bl))
else:
assert np.all(uv2.nsample_array == 1)
if flagging_level == "some" or flagging_level == "all":
assert np.all(uv2.flag_array)
else:
assert np.all(~uv2.flag_array)
# Compare in-place to separated compression.
uv3 = uv0.copy()
uv3.compress_by_redundancy(method=method, tol=tol)
assert uv2 == uv3
# check inflating gets back to the original
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv2.inflate_by_redundancy(tol=tol)
# Confirm that we get the same result looping inflate -> compress -> inflate.
uv3 = uv2.compress_by_redundancy(method=method, tol=tol, inplace=False)
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv3.inflate_by_redundancy(tol=tol)
if method == "average":
# with average, the nsample_array goes up by the number of baselines
# averaged together.
assert not np.allclose(uv3.nsample_array, uv2.nsample_array)
# reset it to test other parameters
uv3.nsample_array = uv2.nsample_array
uv3.history = uv2.history
assert uv2 == uv3
uv2.history = uv0.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv0.reorder_blts(conj_convention="u>0")
uv2.reorder_blts(conj_convention="u>0")
uv2._uvw_array.tols = [0, tol]
if method == "average":
# with average, the nsample_array goes up by the number of baselines
# averaged together.
assert not np.allclose(uv2.nsample_array, uv0.nsample_array)
# reset it to test other parameters
uv2.nsample_array = uv0.nsample_array
if flagging_level == "some":
if method == "select":
# inflated array will be entirely flagged
assert np.all(uv2.flag_array)
assert not np.allclose(uv0.flag_array, uv2.flag_array)
uv2.flag_array = uv0.flag_array
else:
# flag arrays will not match -- inflated array will mostly be unflagged
# it will only be flagged if only one in group
assert not np.allclose(uv0.flag_array, uv2.flag_array)
uv2.flag_array = uv0.flag_array
assert uv2 == uv0
@pytest.mark.parametrize("method", ("select", "average"))
@pytest.mark.parametrize("flagging_level", ("none", "some", "all"))
def test_redundancy_contract_expand_variable_data(method, flagging_level):
# Test that a UVData object can be reduced to one baseline from each redundant group
# and restored to its original form.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
# Fails at lower precision because some baselines fall into multiple
# redundant groups
tol = 0.02
# Assign identical data to each redundant group in comparison object
# Assign data to the index baseline and zeros elsewhere in the one to compress
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
index_bls = [gp[0] for gp in red_gps]
uv0.data_array *= 0
uv1 = uv0.copy()
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv1.data_array[inds] += complex(gp_ind)
if bl in index_bls:
uv0.data_array[inds] += complex(gp_ind)
if flagging_level == "none":
assert np.all(~uv0.flag_array)
elif flagging_level == "some":
# flag all the non index baselines in a redundant group
uv0.flag_array[:, :, :, :] = True
for bl in index_bls:
bl_locs = np.where(uv0.baseline_array == bl)
uv0.flag_array[bl_locs, :, :, :] = False
elif flagging_level == "all":
uv0.flag_array[:] = True
uv0.check()
assert np.all(uv0.flag_array)
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
# inflate to get back to the original size
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv2.inflate_by_redundancy(tol=tol)
uv2.history = uv1.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv1.reorder_blts(conj_convention="u>0")
uv2.reorder_blts(conj_convention="u>0")
uv2._uvw_array.tols = [0, tol]
if method == "select":
if flagging_level == "all":
assert uv2._flag_array != uv1._flag_array
uv2.flag_array = uv1.flag_array
assert uv2 == uv1
else:
if flagging_level == "some":
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv1.baseline_array]
for bl in bls_init:
assert np.all(uv2.get_data(bl) == uv1.get_data(bl))
assert np.all(uv2.get_nsamples(bl) == uv1.get_nsamples(bl))
else:
assert uv2.data_array.min() < uv1.data_array.min()
assert np.all(uv2.data_array <= uv1.data_array)
for gp in red_gps:
bls_init = [bl for bl in gp if bl in uv1.baseline_array]
for bl in bls_init:
assert np.all(
uv2.get_data(bl) == (uv1.get_data(bl) / len(bls_init))
)
assert np.all(uv2.get_nsamples(bl) == len(bls_init))
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("method", ("select", "average"))
def test_redundancy_contract_expand_nblts_not_nbls_times_ntimes(method, casa_uvfits):
uv0 = casa_uvfits
# check that Nblts != Nbls * Ntimes
assert uv0.Nblts != uv0.Nbls * uv0.Ntimes
tol = 1.0
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for i, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds, ...] *= 0
uv0.data_array[inds, ...] += complex(i)
if method == "average":
with pytest.warns(
UserWarning,
match="Index baseline in the redundant group does not have all the "
"times, compressed object will be missing those times.",
):
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
else:
uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)
# check inflating gets back to the original
uvtest.checkWarnings(
uv2.inflate_by_redundancy,
func_args={tol: tol},
nwarnings=3,
message=[
"Missing some redundant groups. Filling in available data.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
"The uvw_array does not match the expected values given the antenna "
"positions.",
],
)
uv2.history = uv0.history
# Inflation changes the baseline ordering into the order of the redundant groups.
# reorder bls for comparison
uv0.reorder_blts()
uv2.reorder_blts()
uv2._uvw_array.tols = [0, tol]
blt_inds = []
missing_inds = []
for bl, t in zip(uv0.baseline_array, uv0.time_array):
if (bl, t) in zip(uv2.baseline_array, uv2.time_array):
this_ind = np.where((uv2.baseline_array == bl) & (uv2.time_array == t))[0]
blt_inds.append(this_ind[0])
else:
# this is missing because of the compress_by_redundancy step
missing_inds.append(
np.where((uv0.baseline_array == bl) & (uv0.time_array == t))[0]
)
uv3 = uv2.select(blt_inds=blt_inds, inplace=False)
orig_inds_keep = list(np.arange(uv0.Nblts))
for ind in missing_inds:
orig_inds_keep.remove(ind)
uv1 = uv0.select(blt_inds=orig_inds_keep, inplace=False)
if method == "average":
# the nsample array in the original object varies, so they
# don't come out the same
assert not np.allclose(uv3.nsample_array, uv1.nsample_array)
uv3.nsample_array = uv1.nsample_array
assert uv3 == uv1
def test_compress_redundancy_variable_inttime():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
ntimes_in = uv0.Ntimes
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
index_bls = [gp[0] for gp in red_gps]
uv0.data_array *= 0
# set different int time for index baseline in object to compress
uv1 = uv0.copy()
ave_int_time = np.average(uv0.integration_time)
nbls_group = np.zeros(len(red_gps))
for gp_ind, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
if inds[0].size > 0:
nbls_group[gp_ind] += 1
uv1.data_array[inds] += complex(gp_ind)
uv0.data_array[inds] += complex(gp_ind)
if bl not in index_bls:
uv0.integration_time[inds] = ave_int_time / 2
assert uv0._integration_time != uv1._integration_time
with pytest.warns(
UserWarning,
match="Integrations times are not identical in a redundant "
"group. Averaging anyway but this may cause unexpected "
"behavior.",
) as warn_record:
uv0.compress_by_redundancy(method="average", tol=tol)
assert len(warn_record) == np.sum(nbls_group > 1) * ntimes_in
uv1.compress_by_redundancy(method="average", tol=tol)
assert uv0 == uv1
@pytest.mark.parametrize("method", ("select", "average"))
def test_compress_redundancy_metadata_only(method):
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
# Assign identical data to each redundant group:
red_gps, centers, lengths = uv0.get_redundancies(
tol=tol, use_antpos=True, conjugate_bls=True
)
for i, gp in enumerate(red_gps):
for bl in gp:
inds = np.where(bl == uv0.baseline_array)
uv0.data_array[inds] *= 0
uv0.data_array[inds] += complex(i)
uv2 = uv0.copy(metadata_only=True)
uv2.compress_by_redundancy(method=method, tol=tol, inplace=True)
uv0.compress_by_redundancy(method=method, tol=tol)
uv0.data_array = None
uv0.flag_array = None
uv0.nsample_array = None
assert uv0 == uv2
def test_compress_redundancy_wrong_method():
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.05
with pytest.raises(ValueError, match="method must be one of"):
uv0.compress_by_redundancy(method="foo", tol=tol, inplace=True)
@pytest.mark.parametrize("method", ("select", "average"))
def test_redundancy_missing_groups(method, tmp_path):
# Check that if I try to inflate a compressed UVData that is missing
# redundant groups, it will raise the right warnings and fill only what
# data are available.
uv0 = UVData()
uv0.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
tol = 0.02
num_select = 19
uv0.compress_by_redundancy(method=method, tol=tol)
fname = str(tmp_path / "temp_hera19_missingreds.uvfits")
bls = np.unique(uv0.baseline_array)[:num_select] # First twenty baseline groups
uv0.select(bls=[uv0.baseline_to_antnums(bl) for bl in bls])
uv0.write_uvfits(fname)
uv1 = UVData()
uv1.read_uvfits(fname)
assert uv0 == uv1 # Check that writing compressed files causes no issues.
with pytest.warns(
UserWarning, match="Missing some redundant groups. Filling in available data."
):
uv1.inflate_by_redundancy(tol=tol)
uv2 = uv1.compress_by_redundancy(method=method, tol=tol, inplace=False)
assert np.unique(uv2.baseline_array).size == num_select
def test_quick_redundant_vs_redundant_test_array():
"""Verify the quick redundancy calc returns the same groups as a known array."""
uv = UVData()
uv.read_uvfits(
os.path.join(DATA_PATH, "fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits")
)
uv.select(times=uv.time_array[0])
uv.unphase_to_drift()
uv.conjugate_bls(convention="u>0", use_enu=True)
tol = 0.05
# a quick and dirty redundancy calculation
unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)
uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)
uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(
uvw_vectors, axis=1
)
uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)
reds = np.where(uvw_diffs < tol, unique_bls, 0)
reds = np.ma.masked_where(reds == 0, reds)
groups = []
for bl in reds:
grp = []
grp.extend(bl.compressed())
for other_bls in reds:
if set(reds.compressed()).issubset(other_bls.compressed()):
grp.extend(other_bls.compressed())
grp = np.unique(grp).tolist()
groups.append(grp)
pad = len(max(groups, key=len))
groups = np.array([i + [-1] * (pad - len(i)) for i in groups])
groups = np.unique(groups, axis=0)
groups = [[bl for bl in grp if bl != -1] for grp in groups]
groups.sort(key=len)
redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(
tol=tol, include_conjugates=True
)
redundant_groups.sort(key=len)
assert groups == redundant_groups
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_redundancy_finder_when_nblts_not_nbls_times_ntimes(casa_uvfits):
"""Test the redundancy finder functions when Nblts != Nbls * Ntimes."""
tol = 1 # meter
uv = casa_uvfits
uv.conjugate_bls(convention="u>0", use_enu=True)
# check that Nblts != Nbls * Ntimes
assert uv.Nblts != uv.Nbls * uv.Ntimes
# a quick and dirty redundancy calculation
unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)
uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)
uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(
uvw_vectors, axis=1
)
uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)
reds = np.where(uvw_diffs < tol, unique_bls, 0)
reds = np.ma.masked_where(reds == 0, reds)
groups = []
for bl in reds:
grp = []
grp.extend(bl.compressed())
for other_bls in reds:
if set(reds.compressed()).issubset(other_bls.compressed()):
grp.extend(other_bls.compressed())
grp = np.unique(grp).tolist()
groups.append(grp)
pad = len(max(groups, key=len))
groups = np.array([i + [-1] * (pad - len(i)) for i in groups])
groups = np.unique(groups, axis=0)
groups = [[bl for bl in grp if bl != -1] for grp in groups]
groups.sort(key=len)
redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(
tol=tol, include_conjugates=True
)
redundant_groups.sort(key=len)
assert groups == redundant_groups
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_overlapping_data_add(casa_uvfits, tmp_path):
# read in test data
uv = casa_uvfits
# slice into four objects
blts1 = np.arange(500)
blts2 = np.arange(500, 1360)
uv1 = uv.select(polarizations=[-1, -2], blt_inds=blts1, inplace=False)
uv2 = uv.select(polarizations=[-3, -4], blt_inds=blts1, inplace=False)
uv3 = uv.select(polarizations=[-1, -2], blt_inds=blts2, inplace=False)
uv4 = uv.select(polarizations=[-3, -4], blt_inds=blts2, inplace=False)
# combine and check for equality
uvfull = uv1 + uv2
uvfull += uv3
uvfull += uv4
extra_history = (
"Downselected to specific baseline-times, polarizations using pyuvdata. "
"Combined data along polarization axis using pyuvdata. Combined data along "
"baseline-time axis using pyuvdata. Overwrote invalid data using pyuvdata."
)
assert uvutils._check_histories(uvfull.history, uv.history + extra_history)
uvfull.history = uv.history # make histories match
assert uv == uvfull
# check combination not-in-place
uvfull = uv1 + uv2
uvfull += uv3
uvfull = uvfull + uv4
uvfull.history = uv.history # make histories match
assert uv == uvfull
# test raising error for adding objects incorrectly (i.e., having the object
# with data to be overwritten come second)
uvfull = uv1 + uv2
uvfull += uv3
pytest.raises(ValueError, uv4.__iadd__, uvfull)
pytest.raises(ValueError, uv4.__add__, uv4, uvfull)
# write individual objects out, and make sure that we can read in the list
uv1_out = str(tmp_path / "uv1.uvfits")
uv1.write_uvfits(uv1_out)
uv2_out = str(tmp_path / "uv2.uvfits")
uv2.write_uvfits(uv2_out)
uv3_out = str(tmp_path / "uv3.uvfits")
uv3.write_uvfits(uv3_out)
uv4_out = str(tmp_path / "uv4.uvfits")
uv4.write_uvfits(uv4_out)
uvfull = UVData()
uvfull.read(np.array([uv1_out, uv2_out, uv3_out, uv4_out]))
assert uvutils._check_histories(uvfull.history, uv.history + extra_history)
uvfull.history = uv.history # make histories match
assert uvfull == uv
# clean up after ourselves
os.remove(uv1_out)
os.remove(uv2_out)
os.remove(uv3_out)
os.remove(uv4_out)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lsts_from_time_with_only_unique(paper_uvh5):
"""
Test `set_lsts_from_time_array` with only unique values is identical to full array.
"""
uv = paper_uvh5
lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees
# calculate the lsts for all elements in time array
full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)
# use `set_lst_from_time_array` to set the uv.lst_array using only unique values
uv.set_lsts_from_time_array()
assert np.array_equal(full_lsts, uv.lst_array)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lsts_from_time_with_only_unique_background(paper_uvh5):
"""
Test `set_lsts_from_time_array` with only unique values is identical to full array.
"""
uv = paper_uvh5
lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees
# calculate the lsts for all elements in time array
full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)
# use `set_lst_from_time_array` to set the uv.lst_array using only unique values
proc = uv.set_lsts_from_time_array(background=True)
proc.join()
assert np.array_equal(full_lsts, uv.lst_array)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_copy(casa_uvfits):
"""Test the copy method"""
uv_object = casa_uvfits
uv_object_copy = uv_object.copy()
assert uv_object_copy == uv_object
uv_object_copy = uv_object.copy(metadata_only=True)
assert uv_object_copy.metadata_only
for name in uv_object._data_params:
setattr(uv_object, name, None)
assert uv_object_copy == uv_object
uv_object_copy = uv_object.copy()
assert uv_object_copy == uv_object
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time(hera_uvh5):
"""Test the upsample_in_time method"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the same
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_with_flags(hera_uvh5):
"""Test the upsample_in_time method with flags"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
# add flags and upsample again
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
# data and nsamples should be changed as normal, but flagged
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
out_flags = uv_object.get_flags(0, 1)
assert np.all(out_flags[:2, 0, 0])
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_noninteger_resampling(hera_uvh5):
"""Test the upsample_in_time method with a non-integer resampling factor"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) * 0.75
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be different by a factor of 2
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_errors(hera_uvh5):
"""Test errors and warnings raised by upsample_in_time"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# test using a too-small integration time
max_integration_time = 1e-3 * np.amin(uv_object.integration_time)
with pytest.raises(ValueError) as cm:
uv_object.upsample_in_time(max_integration_time)
assert str(cm.value).startswith("Decreasing the integration time by more than")
# catch a warning for doing no work
uv_object2 = uv_object.copy()
max_integration_time = 2 * np.amax(uv_object.integration_time)
uvtest.checkWarnings(
uv_object.upsample_in_time,
[max_integration_time],
message="All values in integration_time array are already shorter",
)
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the half the input
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode_with_flags(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode and flags"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# add flags and upsample again
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
# data and nsamples should be changed as normal, but flagged
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
out_flags = uv_object.get_flags(0, 1)
assert np.all(out_flags[:2, 0, 0])
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_summing_correlator_mode_nonint_resampling(hera_uvh5):
"""Test the upsample_in_time method with summing correlator mode
and non-integer resampling
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# try again with a non-integer resampling factor
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) * 0.75
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", summing_correlator_mode=True
)
assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be half the input
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_partial_upsample_in_time(hera_uvh5):
"""Test the upsample_in_time method with non-uniform upsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# change a whole baseline's integration time
bl_inds = uv_object.antpair2ind(0, 1)
uv_object.integration_time[bl_inds] = uv_object.integration_time[0] / 2.0
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf_01 = uv_object.get_data(0, 1)
init_wf_02 = uv_object.get_data(0, 2)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns_01 = uv_object.get_nsamples(0, 1)
init_ns_02 = uv_object.get_nsamples(0, 2)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time)
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.allclose(uv_object.integration_time, max_integration_time)
# output data should be the same
out_wf_01 = uv_object.get_data(0, 1)
out_wf_02 = uv_object.get_data(0, 2)
assert np.all(init_wf_01 == out_wf_01)
assert np.isclose(init_wf_02[0, 0, 0], out_wf_02[0, 0, 0])
assert init_wf_02.size * 2 == out_wf_02.size
# this should be true because there are no flags
out_ns_01 = uv_object.get_nsamples(0, 1)
out_ns_02 = uv_object.get_nsamples(0, 2)
assert np.allclose(out_ns_01, init_ns_01)
assert np.isclose(init_ns_02[0, 0, 0], out_ns_02[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_drift(hera_uvh5):
"""Test the upsample_in_time method on drift mode data"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", allow_drift=True
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be the same
out_wf = uv_object.get_data(0, 1)
# we need a "large" tolerance given the "large" data
new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_in_time_drift_no_phasing(hera_uvh5):
"""Test the upsample_in_time method on drift mode data without phasing"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
max_integration_time = np.amin(uv_object.integration_time) / 2.0
# upsample with allow_drift=False
uv_object.upsample_in_time(
max_integration_time, blt_order="baseline", allow_drift=False
)
assert np.allclose(uv_object.integration_time, max_integration_time)
# we should double the size of the data arrays
assert uv_object.data_array.size == 2 * init_data_size
# output data should be similar, but somewhat different because of the phasing
out_wf = uv_object.get_data(0, 1)
# we need a "large" tolerance given the "large" data
new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time(hera_uvh5):
"""Test the downsample_in_time method"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
# histories are different when n_times_to_avg is set vs min_int_time
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
assert not isinstance(uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_partial_flags(hera_uvh5):
"""Test the downsample_in_time method with partial flagging"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object2 = uv_object.copy()
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_totally_flagged(hera_uvh5):
"""Test the downsample_in_time method with totally flagged integrations"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add more flags and try again. When all the input points are flagged,
# data and nsample should have the same results as no flags but the output
# should be flagged
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[:2], 0, 0, 0] = True
uv_object2 = uv_object.copy()
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that the new sample is flagged
out_flag = uv_object.get_flags(0, 1)
assert out_flag[0, 0, 0]
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_uneven_samples(hera_uvh5):
"""Test the downsample_in_time method with uneven downsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
)
# Only some baselines have an even number of times, so the output integration time
# is not uniformly the same. For the test case, we'll have *either* the original
# integration time or twice that.
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, original_int_time),
np.isclose(uv_object.integration_time, min_integration_time),
)
)
# make sure integration time is correct
# in this case, all integration times should be the target one
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# as usual, the new data should be the average of the input data (3 points now)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=3, blt_order="baseline", minor_order="time", keep_ragged=False
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_uneven_samples_keep_ragged(hera_uvh5):
"""Test downsample_in_time with uneven downsampling and keep_ragged=True."""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
# test again with keep_ragged=False
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=True,
)
# as usual, the new data should be the average of the input data
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=3, blt_order="baseline", minor_order="time", keep_ragged=True
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the sum
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_partial_flags(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
partial flags
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_totally_flagged(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
totally flagged integrations.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add more flags and try again. When all the input points are flagged,
# data and nsample should have the same results as no flags but the output
# should be flagged
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[:2], 0, 0, 0] = True
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
summing_correlator_mode=True,
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that the new sample is flagged
out_flag = uv_object.get_flags(0, 1)
assert out_flag[0, 0, 0]
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_uneven_samples(hera_uvh5):
"""Test the downsample_in_time method with summing correlator mode and
uneven samples.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test again with a downsample factor that doesn't go evenly into the
# number of samples
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
summing_correlator_mode=True,
)
# Only some baselines have an even number of times, so the output integration time
# is not uniformly the same. For the test case, we'll have *either* the original
# integration time or twice that.
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, original_int_time),
np.isclose(uv_object.integration_time, min_integration_time),
)
)
# as usual, the new data should be the average of the input data (3 points now)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_summing_correlator_mode_uneven_samples_drop_ragged(
hera_uvh5,
):
"""Test the downsample_in_time method with summing correlator mode and
uneven samples, dropping ragged ones.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test again with keep_ragged=False
min_integration_time = original_int_time * 3.0
uv_object.downsample_in_time(
min_int_time=min_integration_time,
blt_order="baseline",
minor_order="time",
keep_ragged=False,
summing_correlator_mode=True,
)
# make sure integration time is correct
# in this case, all integration times should be the target one
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# as usual, the new data should be the average of the input data
out_wf = uv_object.get_data(0, 1)
assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_partial_downsample_in_time(hera_uvh5):
"""Test the downsample_in_time method without uniform downsampling"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# change a whole baseline's integration time
bl_inds = uv_object.antpair2ind(0, 1)
uv_object.integration_time[bl_inds] = uv_object.integration_time[0] * 2.0
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline")
# save some values for later
init_wf_01 = uv_object.get_data(0, 1)
init_wf_02 = uv_object.get_data(0, 2)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns_01 = uv_object.get_nsamples(0, 1)
init_ns_02 = uv_object.get_nsamples(0, 2)
# change the target integration time
min_integration_time = np.amax(uv_object.integration_time)
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline"
)
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# output data should be the same
out_wf_01 = uv_object.get_data(0, 1)
out_wf_02 = uv_object.get_data(0, 2)
assert np.all(init_wf_01 == out_wf_01)
assert np.isclose(
(init_wf_02[0, 0, 0] + init_wf_02[1, 0, 0]) / 2.0, out_wf_02[0, 0, 0]
)
# this should be true because there are no flags
out_ns_01 = uv_object.get_nsamples(0, 1)
out_ns_02 = uv_object.get_nsamples(0, 2)
assert np.allclose(out_ns_01, init_ns_01)
assert np.isclose(
(init_ns_02[0, 0, 0] + init_ns_02[1, 0, 0]) / 2.0, out_ns_02[0, 0, 0]
)
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_drift(hera_uvh5):
"""Test the downsample_in_time method on drift mode data"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", allow_drift=True
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", allow_drift=True
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_drift_no_phasing(hera_uvh5):
"""Test the downsample_in_time method on drift mode data without phasing"""
uv_object = hera_uvh5
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# try again with allow_drift=False
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", allow_drift=False,
)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be similar to the average, but somewhat different
# because of the phasing
out_wf = uv_object.get_data(0, 1)
new_tol = 5e-2 * np.amax(np.abs(uv_object.data_array))
assert np.isclose(
(init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0], atol=new_tol
)
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_nsample_precision(hera_uvh5):
"""Test the downsample_in_time method with a half-precision nsample_array"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
original_int_time = np.amax(uv_object.integration_time)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the target integration time
min_integration_time = original_int_time * 2.0
# add flags and try again. With one of the 2 inputs flagged, the data should
# just be the unflagged value and nsample should be half the unflagged one
# and the output should not be flagged.
inds01 = uv_object.antpair2ind(0, 1)
uv_object.flag_array[inds01[0], 0, 0, 0] = True
uv_object2 = uv_object.copy()
# change precision of nsample array
uv_object.nsample_array = uv_object.nsample_array.astype(np.float16)
uv_object.downsample_in_time(
min_int_time=min_integration_time, blt_order="baseline", minor_order="time"
)
out_wf = uv_object.get_data(0, 1)
assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])
# make sure nsamples is correct
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
# make sure nsamples has the right dtype
assert uv_object.nsample_array.dtype.type is np.float16
# check that there are still no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# Compare doing it with n_times_to_avg
uv_object2.nsample_array = uv_object2.nsample_array.astype(np.float16)
uv_object2.downsample_in_time(
n_times_to_avg=2, blt_order="baseline", minor_order="time"
)
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_errors(hera_uvh5):
"""Test various errors and warnings are raised"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# raise an error if set neither min_int_time and n_times_to_avg
with pytest.raises(
ValueError, match="Either min_int_time or n_times_to_avg must be set."
):
uv_object.downsample_in_time()
# raise an error if set both min_int_time and n_times_to_avg
with pytest.raises(
ValueError, match="Only one of min_int_time or n_times_to_avg can be set."
):
uv_object.downsample_in_time(
min_int_time=2 * np.amin(uv_object.integration_time), n_times_to_avg=2
)
# raise an error if only one time
uv_object2 = uv_object.copy()
uv_object2.select(times=uv_object2.time_array[0])
with pytest.raises(
ValueError, match="Only one time in this object, cannot downsample."
):
uv_object2.downsample_in_time(n_times_to_avg=2)
# raise an error for a too-large integration time
max_integration_time = 1e3 * np.amax(uv_object.integration_time)
with pytest.raises(
ValueError, match="Increasing the integration time by more than"
):
uv_object.downsample_in_time(min_int_time=max_integration_time)
# catch a warning for doing no work
uv_object2 = uv_object.copy()
max_integration_time = 0.5 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.downsample_in_time(min_int_time=max_integration_time)
assert uv_object == uv_object2
del uv_object2
# raise an error if n_times_to_avg is not an integer
with pytest.raises(ValueError, match="n_times_to_avg must be an integer."):
uv_object.downsample_in_time(n_times_to_avg=2.5)
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# make a gap in the times to check a warning about that
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-1]] += initial_int_time / (24 * 3600)
uv_object.Ntimes += 1
min_integration_time = 2 * np.amin(uv_object.integration_time)
times_01 = uv_object.get_times(0, 1)
assert np.unique(np.diff(times_01)).size > 1
with pytest.warns(UserWarning, match=("There is a gap in the times of baseline")):
uv_object.downsample_in_time(min_int_time=min_integration_time)
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_int_time_mismatch_warning(hera_uvh5):
"""Test warning in downsample_in_time about mismatch between integration
times and the time between integrations.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_data_size = uv_object.data_array.size
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# change the integration times to catch a warning about integration times
# not matching the time delta between integrations
uv_object.integration_time *= 0.5
min_integration_time = 2 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="The time difference between integrations is not the same"
) as record:
uv_object.downsample_in_time(min_int_time=min_integration_time)
assert len(record) == 11
# Should have half the size of the data array and all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
assert uv_object.data_array.size * 2 == init_data_size
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_integration_time(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# test handling (& warnings) with varying integration time in a baseline
# First, change both integration time & time array to match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)
uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)
uv_object.integration_time[inds01[-2:]] += initial_int_time
uv_object.Ntimes = np.unique(uv_object.time_array).size
min_integration_time = 2 * np.amin(uv_object.integration_time)
# check that there are no warnings about inconsistencies between
# integration_time & time_array
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object.downsample_in_time(min_int_time=min_integration_time)
assert len(record) == 1
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
out_wf = uv_object.get_data(0, 1)
n_times_in = init_wf.shape[0]
n_times_out = out_wf.shape[0]
assert n_times_out == (n_times_in - 2) / 2 + 2
# output data should be the average for the first set
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# last 2 time samples should be identical to initial ones
assert np.isclose(init_wf[-1, 0, 0], out_wf[-1, 0, 0])
assert np.isclose(init_wf[-2, 0, 0], out_wf[-2, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
assert np.isclose(init_ns[-1, 0, 0], out_ns[-1, 0, 0])
assert np.isclose(init_ns[-2, 0, 0], out_ns[2, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_int_time_partial_flags(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline and partial flagging.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# downselect to 14 times and one baseline
uv_object.select(times=np.unique(uv_object.time_array)[:14])
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
# change last 2 integrations to be twice as long
# (so 12 normal length, 2 double length)
# change integration time & time array to match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
# time array is in jd, integration time is in sec
uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)
uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)
uv_object.integration_time[inds01[-2:]] += initial_int_time
uv_object.Ntimes = np.unique(uv_object.time_array).size
# add a flag on last time
uv_object.flag_array[inds01[-1], :, :, :] = True
# add a flag on thrid to last time
uv_object.flag_array[inds01[-3], :, :, :] = True
uv_object2 = uv_object.copy()
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object.downsample_in_time(min_int_time=4 * initial_int_time)
assert len(record) == 1
with pytest.warns(None) as record:
uv_object.downsample_in_time(min_int_time=8 * initial_int_time)
assert len(record) == 0
with pytest.warns(
UserWarning, match="The uvw_array does not match the expected values",
) as record:
uv_object2.downsample_in_time(min_int_time=8 * initial_int_time)
assert len(record) == 1
assert uv_object.history != uv_object2.history
uv_object2.history = uv_object.history
assert uv_object == uv_object2
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_downsample_in_time_varying_integration_time_warning(hera_uvh5):
"""Test downsample_in_time handling of file with integration time changing
within a baseline, but without adjusting the time_array so there is a mismatch.
"""
uv_object = hera_uvh5
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
# save some values for later
init_wf = uv_object.get_data(0, 1)
# check that there are no flags
assert np.nonzero(uv_object.flag_array)[0].size == 0
init_ns = uv_object.get_nsamples(0, 1)
# Next, change just integration time, so time array doesn't match
inds01 = uv_object.antpair2ind(0, 1)
initial_int_time = uv_object.integration_time[inds01[0]]
uv_object.integration_time[inds01[-2:]] += initial_int_time
min_integration_time = 2 * np.amin(uv_object.integration_time)
with pytest.warns(
UserWarning, match="The time difference between integrations is different than"
):
uv_object.downsample_in_time(min_int_time=min_integration_time)
# Should have all the new integration time
# (for this file with 20 integrations and a factor of 2 downsampling)
assert np.all(np.isclose(uv_object.integration_time, min_integration_time))
# output data should be the average
out_wf = uv_object.get_data(0, 1)
assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])
# this should be true because there are no flags
out_ns = uv_object.get_nsamples(0, 1)
assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:Data will be unphased and rephased")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time(hera_uvh5):
"""Test round trip works"""
uv_object = hera_uvh5
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
new_Nblts = uv_object.Nblts
# check that calling upsample again with the same max_integration_time
# gives warning and does nothing
uvtest.checkWarnings(
uv_object.upsample_in_time,
func_args=[max_integration_time],
func_kwargs={"blt_order": "baseline"},
message="All values in the integration_time array are " "already longer",
)
assert uv_object.Nblts == new_Nblts
# check that calling upsample again with the almost the same max_integration_time
# gives warning and does nothing
small_number = 0.9 * uv_object._integration_time.tols[1]
uvtest.checkWarnings(
uv_object.upsample_in_time,
func_args=[max_integration_time - small_number],
func_kwargs={"blt_order": "baseline"},
message="All values in the integration_time array are " "already longer",
)
assert uv_object.Nblts == new_Nblts
uv_object.downsample_in_time(
min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.939524 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
# check that calling downsample again with the same min_integration_time
# gives warning and does nothing
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.downsample_in_time(
min_int_time=np.amin(uv_object2.integration_time), blt_order="baseline"
)
assert uv_object.Nblts == uv_object2.Nblts
# check that calling upsample again with the almost the same min_integration_time
# gives warning and does nothing
with pytest.warns(
UserWarning, match="All values in the integration_time array are already longer"
):
uv_object.upsample_in_time(
np.amin(uv_object2.integration_time) + small_number, blt_order="baseline"
)
assert uv_object.Nblts == uv_object2.Nblts
return
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:Data will be unphased and rephased")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time_odd_resample(hera_uvh5):
"""Test round trip works with odd resampling"""
uv_object = hera_uvh5
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
# try again with a resampling factor of 3 (test odd numbers)
max_integration_time = np.amin(uv_object.integration_time) / 3.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
uv_object.downsample_in_time(
np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.626349 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:The xyz array in ENU_from_ECEF")
@pytest.mark.filterwarnings("ignore:The enu array in ECEF_from_ENU")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_upsample_downsample_in_time_metadata_only(hera_uvh5):
"""Test round trip works with metadata-only objects"""
uv_object = hera_uvh5
# drop the data arrays
uv_object.data_array = None
uv_object.flag_array = None
uv_object.nsample_array = None
# set uvws from antenna positions so they'll agree later.
# the fact that this is required is a bit concerning, it means that
# our calculated uvws from the antenna positions do not match what's in the file
uv_object.set_uvws_from_antenna_positions()
uv_object.phase_to_time(Time(uv_object.time_array[0], format="jd"))
# reorder to make sure we get the right value later
uv_object.reorder_blts(order="baseline", minor_order="time")
uv_object2 = uv_object.copy()
max_integration_time = np.amin(uv_object.integration_time) / 2.0
uv_object.upsample_in_time(max_integration_time, blt_order="baseline")
assert np.amax(uv_object.integration_time) <= max_integration_time
uv_object.downsample_in_time(
np.amin(uv_object2.integration_time), blt_order="baseline"
)
# increase tolerance on LST if iers.conf.auto_max_age is set to None, as we
# do in testing if the iers url is down. See conftest.py for more info.
if iers.conf.auto_max_age is None:
uv_object._lst_array.tols = (0, 1e-4)
# make sure that history is correct
assert (
"Upsampled data to 0.939524 second integration time using pyuvdata."
in uv_object.history
)
assert (
"Downsampled data to 1.879048 second integration time using pyuvdata."
in uv_object.history
)
# overwrite history and check for equality
uv_object.history = uv_object2.history
assert uv_object == uv_object2
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time(bda_test_file):
"""Test the resample_in_time method"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
uv_object.resample_in_time(8)
# Should have all the target integration time
assert np.all(np.isclose(uv_object.integration_time, 8))
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size * 4 == init_data_1_136.size
assert out_data_1_137.size * 2 == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size / 2 == init_data_136_137.size
# check some values
assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])
assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_downsample_only(bda_test_file):
"""Test resample_in_time with downsampling only"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
# resample again, with only_downsample set
uv_object.resample_in_time(8, only_downsample=True)
# Should have all less than or equal to the target integration time
assert np.all(
np.logical_or(
np.isclose(uv_object.integration_time, 8),
np.isclose(uv_object.integration_time, 16),
)
)
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size * 4 == init_data_1_136.size
assert out_data_1_137.size * 2 == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size == init_data_136_137.size
# check some values
assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])
assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_only_upsample(bda_test_file):
"""Test resample_in_time with only upsampling"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv_object = bda_test_file
# save some initial info
# 2s integration time
init_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
init_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
init_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
init_data_136_137 = uv_object.get_data((136, 137))
# again, with only_upsample set
uv_object.resample_in_time(8, only_upsample=True)
# Should have all greater than or equal to the target integration time
assert np.all(
np.logical_or(
np.logical_or(
np.isclose(uv_object.integration_time, 2.0),
np.isclose(uv_object.integration_time, 4.0),
),
np.isclose(uv_object.integration_time, 8.0),
)
)
# 2s integration time
out_data_1_136 = uv_object.get_data((1, 136))
# 4s integration time
out_data_1_137 = uv_object.get_data((1, 137))
# 8s integration time
out_data_1_138 = uv_object.get_data((1, 138))
# 16s integration time
out_data_136_137 = uv_object.get_data((136, 137))
# check array sizes make sense
assert out_data_1_136.size == init_data_1_136.size
assert out_data_1_137.size == init_data_1_137.size
assert out_data_1_138.size == init_data_1_138.size
assert out_data_136_137.size / 2 == init_data_136_137.size
# check some values
assert np.isclose(init_data_1_136[0, 0, 0], out_data_1_136[0, 0, 0])
assert np.isclose(init_data_1_137[0, 0, 0], out_data_1_137[0, 0, 0])
assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])
assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])
return
@pytest.mark.filterwarnings("ignore:Telescope mock-HERA is not in known_telescopes")
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_partial_flags(bda_test_file):
"""Test resample_in_time with partial flags"""
# Note this file has slight variations in the delta t between integrations
# that causes our gap test to issue a warning, but the variations are small
# We aren't worried about them, so we filter those warnings
uv = bda_test_file
# For ease, select a single baseline
uv.select(bls=[(1, 136)])
# Flag one time
uv.flag_array[0, :, :, :] = True
uv2 = uv.copy()
# Downsample in two stages
uv.resample_in_time(4.0, only_downsample=True)
uv.resample_in_time(8.0, only_downsample=True)
# Downsample in a single stage
uv2.resample_in_time(8.0, only_downsample=True)
assert uv.history != uv2.history
uv2.history = uv.history
assert uv == uv2
return
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_downsample_in_time_mwa():
"""
Test resample in time works with numerical weirdnesses.
In particular, when min_int_time is not quite an integer mulitple of
integration_time. This text broke with a prior bug (see issue 773).
"""
filename = os.path.join(DATA_PATH, "mwa_integration_time.uvh5")
uv = UVData()
uv.read(filename)
uv.phase_to_time(np.mean(uv.time_array))
uv_object2 = uv.copy()
# all data within 5 milliseconds of 2 second integrations
assert np.allclose(uv.integration_time, 2, atol=5e-3)
min_int_time = 4.0
uv.resample_in_time(min_int_time, only_downsample=True, keep_ragged=False)
assert np.all(uv.integration_time > (min_int_time - 5e-3))
# Now do the human expected thing:
init_data = uv_object2.get_data((61, 58))
uv_object2.downsample_in_time(n_times_to_avg=2, keep_ragged=False)
assert uv_object2.Ntimes == 5
out_data = uv_object2.get_data((61, 58))
assert np.isclose(np.mean(init_data[0:2, 0, 0]), out_data[0, 0, 0])
@pytest.mark.filterwarnings("ignore:There is a gap in the times of baseline")
def test_resample_in_time_warning():
filename = os.path.join(DATA_PATH, "mwa_integration_time.uvh5")
uv = UVData()
uv.read(filename)
uv2 = uv.copy()
with pytest.warns(
UserWarning, match="No resampling will be done because target time"
):
uv.resample_in_time(3, keep_ragged=False)
assert uv2 == uv
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average(uvdata_data):
"""Test averaging in frequency."""
eq_coeffs = np.tile(
np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),
(uvdata_data.uv_object.Nants_telescope, 1),
)
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.check()
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
uvtest.checkWarnings(
uvdata_data.uv_object.frequency_average,
[2],
message="eq_coeffs vary by frequency",
)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_coeffs = eq_coeffs.reshape(
uvdata_data.uv_object2.Nants_telescope,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_uneven(uvdata_data):
"""Test averaging in frequency with a number that is not a factor of Nfreqs."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
with pytest.warns(
UserWarning,
match="Nfreqs does not divide by `n_chan_to_avg` evenly. The final 1 "
"frequencies will be excluded, to control which frequencies to exclude, "
"use a select to control.",
):
uvdata_data.uv_object.frequency_average(7)
assert uvdata_data.uv_object2.Nfreqs % 7 != 0
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs // 7)
expected_freqs = uvdata_data.uv_object2.freq_array[
:, np.arange((uvdata_data.uv_object2.Nfreqs // 7) * 7)
]
expected_freqs = expected_freqs.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs // 7), 7
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
expected_data = expected_data[
:, :, 0 : ((uvdata_data.uv_object2.Nfreqs // 7) * 7), :
]
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs // 7),
7,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging(uvdata_data):
"""Test averaging in frequency with flagging all samples averaged."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:2, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 2
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.sum(uvdata_data.uv_object.flag_array[inds01[0], :, 0, :]) == 4
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
assert (
np.nonzero(uvdata_data.uv_object.flag_array[inds01[1:], :, 0, :])[0].size == 0
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_partial(uvdata_data):
"""Test averaging in frequency with flagging only one sample averaged."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 0, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 1, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_full_and_partial(uvdata_data):
"""
Test averaging in frequency with flagging all of one and only one of
another sample averaged.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 3
)
uvdata_data.uv_object.frequency_average(2)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_flagging_partial_twostage(uvdata_data):
"""
Test averaging in frequency in two stages with flagging only one sample averaged.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols
)
uv_object3 = uvdata_data.uv_object.copy()
uvdata_data.uv_object.frequency_average(2)
uvdata_data.uv_object.frequency_average(2)
uv_object3.frequency_average(4)
assert uvdata_data.uv_object == uv_object3
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_summing_corr_mode(uvdata_data):
"""Test averaging in frequency."""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
uvdata_data.uv_object.frequency_average(2, summing_correlator_mode=True)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).sum(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_propagate_flags(uvdata_data):
"""
Test averaging in frequency with flagging all of one and only one of
another sample averaged, and propagating flags. Data should be identical,
but flags should be slightly different compared to other test of the same
name.
"""
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# apply some flagging for testing
inds01 = uvdata_data.uv_object.antpair2ind(0, 1)
uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== uvdata_data.uv_object.Npols * 3
)
uvdata_data.uv_object.frequency_average(2, propagate_flags=True)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
# Twice as many flags should exist compared to test of previous name.
assert (
np.nonzero(uvdata_data.uv_object.flag_array)[0].size
== 2 * uvdata_data.uv_object.Npols
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_frequency_average_nsample_precision(uvdata_data):
"""Test averaging in frequency with a half-precision nsample_array."""
eq_coeffs = np.tile(
np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),
(uvdata_data.uv_object.Nants_telescope, 1),
)
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.check()
# check that there's no flagging
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
# change precision of the nsample array
uvdata_data.uv_object.nsample_array = uvdata_data.uv_object.nsample_array.astype(
np.float16
)
uvtest.checkWarnings(
uvdata_data.uv_object.frequency_average,
[2],
message="eq_coeffs vary by frequency",
)
assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)
expected_freqs = uvdata_data.uv_object2.freq_array.reshape(
uvdata_data.uv_object2.Nspws, int(uvdata_data.uv_object2.Nfreqs / 2), 2
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0
expected_coeffs = eq_coeffs.reshape(
uvdata_data.uv_object2.Nants_telescope,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
).mean(axis=2)
assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0
# no flagging, so the following is true
expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze="none")
reshape_tuple = (
expected_data.shape[0],
uvdata_data.uv_object2.Nspws,
int(uvdata_data.uv_object2.Nfreqs / 2),
2,
uvdata_data.uv_object2.Npols,
)
expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)
assert np.allclose(
uvdata_data.uv_object.get_data(0, 1, squeeze="none"), expected_data
)
assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0
assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)
assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)
# make sure we still have a half-precision nsample_array
assert uvdata_data.uv_object.nsample_array.dtype.type is np.float16
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_divide(uvdata_data):
"""Test using the remove_eq_coeffs method with divide convention."""
# give eq_coeffs to the object
eq_coeffs = np.empty(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),
dtype=np.float,
)
for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):
eq_coeffs[i, :] = ant + 1
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.eq_coeffs_convention = "divide"
uvdata_data.uv_object.remove_eq_coeffs()
# make sure the right coefficients were removed
for key in uvdata_data.uv_object.get_antpairs():
eq1 = key[0] + 1
eq2 = key[1] + 1
blt_inds = uvdata_data.uv_object.antpair2ind(key)
norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]
unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]
assert np.allclose(norm_data, unnorm_data / (eq1 * eq2))
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_multiply(uvdata_data):
"""Test using the remove_eq_coeffs method with multiply convention."""
# give eq_coeffs to the object
eq_coeffs = np.empty(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),
dtype=np.float,
)
for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):
eq_coeffs[i, :] = ant + 1
uvdata_data.uv_object.eq_coeffs = eq_coeffs
uvdata_data.uv_object.eq_coeffs_convention = "multiply"
uvdata_data.uv_object.remove_eq_coeffs()
# make sure the right coefficients were removed
for key in uvdata_data.uv_object.get_antpairs():
eq1 = key[0] + 1
eq2 = key[1] + 1
blt_inds = uvdata_data.uv_object.antpair2ind(key)
norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]
unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]
assert np.allclose(norm_data, unnorm_data * (eq1 * eq2))
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_remove_eq_coeffs_errors(uvdata_data):
"""Test errors raised by remove_eq_coeffs method."""
# raise error when eq_coeffs are not defined
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith("The eq_coeffs attribute must be defined")
# raise error when eq_coeffs are defined but not eq_coeffs_convention
uvdata_data.uv_object.eq_coeffs = np.ones(
(uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs)
)
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith(
"The eq_coeffs_convention attribute must be defined"
)
# raise error when convention is not a valid choice
uvdata_data.uv_object.eq_coeffs_convention = "foo"
with pytest.raises(ValueError) as cm:
uvdata_data.uv_object.remove_eq_coeffs()
assert str(cm.value).startswith("Got unknown convention foo. Must be one of")
return
@pytest.mark.parametrize(
"read_func,filelist",
[
("read_miriad", [os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcA")] * 2),
(
"read_mwa_corr_fits",
[[mwa_corr_files[0:2], [mwa_corr_files[0], mwa_corr_files[2]]]],
),
("read_uvh5", [os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")] * 2),
(
"read_uvfits",
[os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")] * 2,
),
(
"read_ms",
[
os.path.join(DATA_PATH, "multi_1.ms"),
os.path.join(DATA_PATH, "multi_2.ms"),
],
),
(
"read_fhd",
[
list(np.array(fhd_files)[[0, 1, 2, 4, 6, 7]]),
list(np.array(fhd_files)[[0, 2, 3, 5, 6, 7]]),
],
),
],
)
def test_multifile_read_errors(read_func, filelist):
uv = UVData()
with pytest.raises(ValueError) as cm:
getattr(uv, read_func)(filelist)
assert str(cm.value).startswith(
"Reading multiple files from class specific read functions is no "
"longer supported."
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_multifile_read_check(hera_uvh5, tmp_path):
"""Test setting skip_bad_files=True when reading in files"""
uvTrue = hera_uvh5
uvh5_file = os.path.join(DATA_PATH, "zen.2458661.23480.HH.uvh5")
# Create a test file and remove header info to 'corrupt' it
testfile = str(tmp_path / "zen.2458661.23480.HH.uvh5")
uvTrue.write_uvh5(testfile)
with h5py.File(testfile, "r+") as h5f:
del h5f["Header/ant_1_array"]
uv = UVData()
# Test that the expected error arises
with pytest.raises(KeyError) as cm:
uv.read(testfile, skip_bad_files=False)
assert "Unable to open object (object 'ant_1_array' doesn't exist)" in str(cm.value)
# Test when the corrupted file is at the beggining, skip_bad_files=False
fileList = [testfile, uvh5_file]
with pytest.raises(KeyError) as cm:
with pytest.warns(UserWarning, match="Failed to read"):
uv.read(fileList, skip_bad_files=False)
assert "Unable to open object (object 'ant_1_array' doesn't exist)" in str(cm.value)
assert uv != uvTrue
# Test when the corrupted file is at the beggining, skip_bad_files=True
fileList = [testfile, uvh5_file]
with pytest.warns(UserWarning, match="Failed to read") as record:
uv.read(fileList, skip_bad_files=True)
assert len(record) == 2
assert str(record[1].message).startswith("Failed to read")
assert str(record[0].message).startswith(
"The uvw_array does not match the expected values given the antenna positions."
)
assert uv == uvTrue
# Test when the corrupted file is at the end of a list
fileList = [uvh5_file, testfile]
with pytest.warns(UserWarning, match="Failed to read") as cm:
uv.read(fileList, skip_bad_files=True)
# Check that the uncorrupted file was still read in
assert uv == uvTrue
os.remove(testfile)
return
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("err_type", ["KeyError", "ValueError"])
def test_multifile_read_check_long_list(hera_uvh5, tmp_path, err_type):
"""
Test KeyError catching by setting skip_bad_files=True when
reading in files for a list of length >2
"""
# Create mini files for testing
uv = hera_uvh5
fileList = []
for i in range(0, 4):
uv2 = uv.select(
times=np.unique(uv.time_array)[i * 5 : i * 5 + 4], inplace=False
)
fname = str(tmp_path / f"minifile_{i}.uvh5")
fileList.append(fname)
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[-1], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[-1], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
# Test with corrupted file as last file in list, skip_bad_files=True
uvTest = UVData()
uvtest.checkWarnings(
uvTest.read,
func_args=[fileList[0:4]],
func_kwargs={"skip_bad_files": True},
nwarnings=10,
message=(
[
"The uvw_array does not match the expected values given the "
"antenna positions."
]
* 9
+ ["Failed to read"]
),
)
uvTrue = UVData()
uvTrue.read(fileList[0:3], skip_bad_files=True)
assert uvTest == uvTrue
# Repeat above test, but with corrupted file as first file in list
os.remove(fileList[3])
uv2 = uv.select(times=np.unique(uv.time_array)[15:19], inplace=False)
fname = str(tmp_path / f"minifile_{3}.uvh5")
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[0], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[0], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
uvTest = UVData()
with pytest.warns(UserWarning):
uvTest.read(fileList[0:4], skip_bad_files=True)
uvTrue = UVData()
uvTrue.read(fileList[1:4], skip_bad_files=True)
assert uvTest == uvTrue
# Repeat above test, but with corrupted file in the middle of the list
os.remove(fileList[0])
uv2 = uv.select(times=np.unique(uv.time_array)[0:4], inplace=False)
fname = str(tmp_path / f"minifile_{0}.uvh5")
uv2.write_uvh5(fname)
if err_type == "KeyError":
with h5py.File(fileList[1], "r+") as h5f:
del h5f["Header/ant_1_array"]
elif err_type == "ValueError":
with h5py.File(fileList[1], "r+") as h5f:
h5f["Header/antenna_numbers"][3] = 85
h5f["Header/ant_1_array"][2] = 1024
uvTest = UVData()
with pytest.warns(UserWarning):
uvTest.read(fileList[0:4], skip_bad_files=True)
uvTrue = UVData()
uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=True)
assert uvTest == uvTrue
# Test with corrupted file in middle of list, but with skip_bad_files=False
uvTest = UVData()
if err_type == "KeyError":
with pytest.raises(KeyError, match="Unable to open object"):
with pytest.warns(UserWarning, match="Failed to read"):
uvTest.read(fileList[0:4], skip_bad_files=False)
elif err_type == "ValueError":
with pytest.raises(ValueError, match="Nants_data must be equal to"):
with pytest.warns(UserWarning, match="Failed to read"):
uvTest.read(fileList[0:4], skip_bad_files=False)
uvTrue = UVData()
uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=False)
assert uvTest != uvTrue
os.remove(fileList[0])
os.remove(fileList[1])
os.remove(fileList[2])
os.remove(fileList[3])
return
def test_deprecation_warnings_set_phased():
"""
Test the deprecation warnings in set_phased et al.
"""
uv = UVData()
# first call set_phased
with pytest.warns(DeprecationWarning, match="`set_phased` is deprecated"):
uv.set_phased()
assert uv.phase_type == "phased"
assert uv._phase_center_epoch.required is True
assert uv._phase_center_ra.required is True
assert uv._phase_center_dec.required is True
# now call set_drift
with pytest.warns(DeprecationWarning, match="`set_drift` is deprecated"):
uv.set_drift()
assert uv.phase_type == "drift"
assert uv._phase_center_epoch.required is False
assert uv._phase_center_ra.required is False
assert uv._phase_center_dec.required is False
# now call set_unknown_phase_type
with pytest.warns(
DeprecationWarning, match="`set_unknown_phase_type` is deprecated"
):
uv.set_unknown_phase_type()
assert uv.phase_type == "unknown"
assert uv._phase_center_epoch.required is False
assert uv._phase_center_ra.required is False
assert uv._phase_center_dec.required is False
return
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not in known_telescopes.")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_read_background_lsts():
"""Test reading a file with the lst calc in the background."""
uvd = UVData()
uvd2 = UVData()
testfile = os.path.join(DATA_PATH, "day2_TDEM0003_10s_norx_1src_1spw.uvfits")
uvd.read(testfile, background_lsts=False)
uvd2.read(testfile, background_lsts=True)
assert uvd == uvd2
|
from django.core.exceptions import ObjectDoesNotExist
from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand
from corehq.apps.commtrack.models import (
SQLAlertConfig,
SQLActionConfig,
SQLConsumptionConfig,
SQLStockLevelsConfig,
SQLStockRestoreConfig,
AlertConfig,
ConsumptionConfig,
StockLevelsConfig,
StockRestoreConfig,
)
class Command(PopulateSQLCommand):
@classmethod
def couch_doc_type(self):
return 'CommtrackConfig'
@classmethod
def sql_class(self):
from corehq.apps.commtrack.models import SQLCommtrackConfig
return SQLCommtrackConfig
@classmethod
def commit_adding_migration(cls):
return None
@classmethod
def diff_couch_and_sql(cls, doc, obj):
diffs = []
for attr in cls.attrs_to_sync():
diffs.append(cls.diff_attr(attr, doc, obj))
diffs.extend(cls.diff_lists(doc.get('actions', []), obj.all_actions, [
'action', 'subaction', '_keyword', 'caption'
]))
for spec in cls.one_to_one_submodels():
normalize = float if spec["sql_class"] == SQLStockLevelsConfig else None
sql_submodel = getattr(obj, spec['sql_class'].__name__.lower())
couch_submodel = doc.get(spec['couch_attr'], {})
for attr in spec['fields']:
diffs.append(cls.diff_attr(attr, couch_submodel, sql_submodel, normalize=normalize))
diffs = [d for d in diffs if d]
return "\n".join(diffs) if diffs else None
@classmethod
def attrs_to_sync(cls):
return [
"domain",
"use_auto_emergency_levels",
"sync_consumption_fixtures",
"use_auto_consumption",
"individual_consumption_defaults",
]
@classmethod
def one_to_one_submodels(cls):
return [
{
"sql_class": SQLAlertConfig,
"couch_class": AlertConfig,
"couch_attr": "alert_config",
"fields": ['stock_out_facilities', 'stock_out_commodities', 'stock_out_rates', 'non_report'],
},
{
"sql_class": SQLConsumptionConfig,
"couch_class": ConsumptionConfig,
"couch_attr": "consumption_config",
"fields": [
'min_transactions', 'min_window', 'optimal_window',
'use_supply_point_type_default_consumption', 'exclude_invalid_periods',
]
},
{
"sql_class": SQLStockLevelsConfig,
"couch_class": StockLevelsConfig,
"couch_attr": "stock_levels_config",
"fields": ['emergency_level', 'understock_threshold', 'overstock_threshold'],
},
{
"sql_class": SQLStockRestoreConfig,
"couch_class": StockRestoreConfig,
"couch_attr": "ota_restore_config",
"fields": [
'section_to_consumption_types', 'force_consumption_case_types', 'use_dynamic_product_list',
],
"wrap": cls._wrap_stock_restore_config,
},
]
@classmethod
def _wrap_stock_restore_config(cls, doc):
if 'force_to_consumption_case_types' in doc:
realval = doc['force_to_consumption_case_types']
oldval = doc.get('force_consumption_case_types')
if realval and not oldval:
doc['force_consumption_case_types'] = realval
del doc['force_to_consumption_case_types']
return doc
@classmethod
def _wrap_action_config(cls, data):
if 'action_type' in data:
data['action'] = data['action_type']
del data['action_type']
if 'name' in data:
if data['name'] == 'lost':
data['subaction'] = 'loss'
del data['name']
return data
def update_or_create_sql_object(self, doc):
try:
model = self.sql_class().objects.get(couch_id=doc['_id'])
created = False
except ObjectDoesNotExist:
model = self.sql_class()(couch_id=doc['_id'])
created = True
for attr in self.attrs_to_sync():
setattr(model, attr, doc.get(attr))
for spec in self.one_to_one_submodels():
couch_submodel = doc.get(spec['couch_attr'])
if 'wrap' in spec:
couch_submodel = spec['wrap'](couch_submodel)
setattr(model, spec['sql_class'].__name__.lower(), spec['sql_class'](**{
field: couch_submodel.get(field)
for field in spec['fields']
}))
sql_actions = []
for a in doc['actions']:
a = self._wrap_action_config(a)
sql_actions.append(SQLActionConfig(
action=doc.get('action'),
subaction=doc.get('subaction'),
_keyword=doc.get('_keyword'),
caption=doc.get('caption'),
))
model.set_actions(sql_actions)
model.save()
for spec in self.one_to_one_submodels():
submodel = getattr(model, spec['sql_class'].__name__.lower())
submodel.commtrack_config = model
submodel.save()
return (model, created)
Fixed action saving in migration
from django.core.exceptions import ObjectDoesNotExist
from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand
from corehq.apps.commtrack.models import (
SQLAlertConfig,
SQLActionConfig,
SQLConsumptionConfig,
SQLStockLevelsConfig,
SQLStockRestoreConfig,
AlertConfig,
ConsumptionConfig,
StockLevelsConfig,
StockRestoreConfig,
)
class Command(PopulateSQLCommand):
@classmethod
def couch_doc_type(self):
return 'CommtrackConfig'
@classmethod
def sql_class(self):
from corehq.apps.commtrack.models import SQLCommtrackConfig
return SQLCommtrackConfig
@classmethod
def commit_adding_migration(cls):
return None
@classmethod
def diff_couch_and_sql(cls, doc, obj):
diffs = []
for attr in cls.attrs_to_sync():
diffs.append(cls.diff_attr(attr, doc, obj))
diffs.extend(cls.diff_lists(doc.get('actions', []), obj.all_actions, [
'action', 'subaction', '_keyword', 'caption'
]))
for spec in cls.one_to_one_submodels():
normalize = float if spec["sql_class"] == SQLStockLevelsConfig else None
sql_submodel = getattr(obj, spec['sql_class'].__name__.lower())
couch_submodel = doc.get(spec['couch_attr'], {})
for attr in spec['fields']:
diffs.append(cls.diff_attr(attr, couch_submodel, sql_submodel, normalize=normalize))
diffs = [d for d in diffs if d]
return "\n".join(diffs) if diffs else None
@classmethod
def attrs_to_sync(cls):
return [
"domain",
"use_auto_emergency_levels",
"sync_consumption_fixtures",
"use_auto_consumption",
"individual_consumption_defaults",
]
@classmethod
def one_to_one_submodels(cls):
return [
{
"sql_class": SQLAlertConfig,
"couch_class": AlertConfig,
"couch_attr": "alert_config",
"fields": ['stock_out_facilities', 'stock_out_commodities', 'stock_out_rates', 'non_report'],
},
{
"sql_class": SQLConsumptionConfig,
"couch_class": ConsumptionConfig,
"couch_attr": "consumption_config",
"fields": [
'min_transactions', 'min_window', 'optimal_window',
'use_supply_point_type_default_consumption', 'exclude_invalid_periods',
]
},
{
"sql_class": SQLStockLevelsConfig,
"couch_class": StockLevelsConfig,
"couch_attr": "stock_levels_config",
"fields": ['emergency_level', 'understock_threshold', 'overstock_threshold'],
},
{
"sql_class": SQLStockRestoreConfig,
"couch_class": StockRestoreConfig,
"couch_attr": "ota_restore_config",
"fields": [
'section_to_consumption_types', 'force_consumption_case_types', 'use_dynamic_product_list',
],
"wrap": cls._wrap_stock_restore_config,
},
]
@classmethod
def _wrap_stock_restore_config(cls, doc):
if 'force_to_consumption_case_types' in doc:
realval = doc['force_to_consumption_case_types']
oldval = doc.get('force_consumption_case_types')
if realval and not oldval:
doc['force_consumption_case_types'] = realval
del doc['force_to_consumption_case_types']
return doc
@classmethod
def _wrap_action_config(cls, data):
if 'action_type' in data:
data['action'] = data['action_type']
del data['action_type']
if 'name' in data:
if data['name'] == 'lost':
data['subaction'] = 'loss'
del data['name']
return data
def update_or_create_sql_object(self, doc):
try:
model = self.sql_class().objects.get(couch_id=doc['_id'])
created = False
except ObjectDoesNotExist:
model = self.sql_class()(couch_id=doc['_id'])
created = True
for attr in self.attrs_to_sync():
setattr(model, attr, doc.get(attr))
for spec in self.one_to_one_submodels():
couch_submodel = doc.get(spec['couch_attr'])
if 'wrap' in spec:
couch_submodel = spec['wrap'](couch_submodel)
setattr(model, spec['sql_class'].__name__.lower(), spec['sql_class'](**{
field: couch_submodel.get(field)
for field in spec['fields']
}))
sql_actions = []
for a in doc['actions']:
a = self._wrap_action_config(a)
sql_actions.append(SQLActionConfig(
action=a.get('action'),
subaction=a.get('subaction'),
_keyword=a.get('_keyword'),
caption=a.get('caption'),
))
model.set_actions(sql_actions)
model.save()
for spec in self.one_to_one_submodels():
submodel = getattr(model, spec['sql_class'].__name__.lower())
submodel.commtrack_config = model
submodel.save()
return (model, created)
|
import collections
import contextlib
import copy
import warnings
import numpy
import six
from chainer import cuda
from chainer import initializers
from chainer import variable
def _is_shape(value):
if value is None:
return True
elif isinstance(value, collections.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
return int(value)
except TypeError:
return False
def _ensure_shape_dtype(value):
# Return value paired with dtype FP32 if it is a shape.
if _is_shape(value):
return value, 'f'
# Otherwise, returns it with assuming a shape-dtype pair.
else:
return value
class Link(object):
"""Building block of model definitions.
Link is a building block of neural network models that support various
features like handling parameters, defining network fragments,
serialization, etc.
Link is the primitive structure for the model definitions. It supports
management of parameter variables and *persistent values* that should be
incorporated to serialization.
Parameter is an instance of :class:`~chainer.Parameter` registered to a
link. A :class:`~chainer.Parameter` object can be registered as a
parameter of the link by assigning it to an attribute within *an
initialization scope*, which is a code surrounded by a
:meth:`init_scope` context manager using the ``with`` statement.
Persistent values are arrays, scalars, or any other serializable values
registered via :meth:`register_persistent` or :meth:`add_persistent`.
.. note::
Whereas arbitrary serializable objects can be registered as persistent
values, it is strongly recommended to just register values that should
be treated as results of learning. A typical example of persistent
values is ones computed during training and required for testing, e.g.
running statistics for batch normalization.
Parameters and persistent values are referred by their names. They can be
accessed as attributes of the links. Link class itself manages the lists
of names of parameters and persistent values to distinguish parameters and
persistent values from other attributes.
Link can be composed into more complex models. This composition feature is
supported by child classes like :class:`Chain` and :class:`ChainList`. One
can create a chain by combining one or more links. See the documents for
these classes for details.
As noted above, Link supports the serialization protocol of the
:class:`~chainer.Serializer` class. **Note that only parameters and
persistent values are saved and loaded.** Other attributes are considered
as a part of user program (i.e. a part of network definition). In order to
construct a link from saved file, other attributes must be identically
reconstructed by user codes.
.. admonition:: Example
This is a simple example of custom link definition. Chainer itself also
provides many links defined under the :mod:`~chainer.links` module. They
might serve as examples, too.
Consider we want to define a simple primitive link that implements a
fully-connected layer based on the :func:`~functions.linear` function.
Note that this function takes input units, a weight variable, and a bias
variable as arguments. Then, the fully-connected layer can be defined as
follows::
import chainer
import chainer.functions as F
from chainer import initializers
import numpy as np
class LinearLayer(chainer.Link):
def __init__(self, n_in, n_out):
super(LinearLayer, self).__init__()
with self.init_scope():
self.W = chainer.Parameter(
initializers.Normal(), (n_out, n_in))
self.b = chainer.Parameter(
initializers.Zero(), (n_out,))
def __call__(self, x):
return F.linear(x, self.W, self.b)
This example shows that a user can define arbitrary parameters and use
them in any methods. Links typically implement the ``__call__``
operator, although they can also provide other methods to implement the
forward propagation.
Args:
params: *(deprecated since v2.0.0)* Names, shapes, and optional dtypes
of initial parameters. The keywords are used as the parameter
names and the corresponding values consist either of the shape or
a tuple of shape and a dtype ``(shape, dtype)``. If only the shape
is supplied, the default dtype will be used.
Attributes:
name (str): Name of this link, given by the parent chain (if exists).
"""
def __init__(self, **params):
self._params = set()
self._persistent = set()
self._cpu = True
self._device_id = None
self._within_init_scope = False
self.name = None
for name, value in six.iteritems(params):
# Note: deprecation warning will be raised in add_param
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
@property
def xp(self):
"""Array module for this link.
Depending on which of CPU/GPU this link is on, this property returns
:mod:`numpy` or :mod:`cupy`.
"""
return numpy if self._cpu else cuda.cupy
@property
def within_init_scope(self):
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self):
"""Creates an initialization scope.
This method returns a context manager object that enables registration
of parameters (and links for :class:`~chainer.Chain`) by an assignment.
A :class:`~chainer.Parameter` object can be automatically registered
by assigning it to an attribute under this context manager.
.. admonition:: Example
In most cases, the parameter registration is done in the
initializer method. Using the ``init_scope`` method, we can
simply assign a :class:`~chainer.Parameter` object to register
it to the link.
.. code-block:: python
class MyLink(chainer.Link):
def __init__(self):
super().__init__()
with self.init_scope():
self.W = chainer.Parameter(0, (10, 5))
self.b = chainer.Parameter(0, (5,))
"""
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
if not self._cpu:
value.to_gpu(self._device_id)
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name):
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(self, name, shape=None, dtype=numpy.float32,
initializer=None):
"""Registers a parameter to the link.
.. deprecated:: v2.0.0
Assign a :class:`~chainer.Parameter` object directly to an
attribute within :meth:`an initialization scope <init_scope>`
instead. For example, the following code
.. code-block:: python
link.add_param('W', shape=(5, 3))
can be replaced by the following assignment.
.. code-block:: python
with self.init_scope():
link.W = chainer.Parameter(None, (5, 3))
The latter one is easier for IDEs to keep track of the attribute's
type.
Args:
name (str): Name of the parameter. This name is also used as the
attribute name.
shape (int or tuple of ints): Shape of the parameter array. If it
is omitted, the parameter variable is left uninitialized.
dtype: Data type of the parameter array.
initializer: If it is not ``None``, the data is initialized with
the given initializer. If it is an array, the data is directly
initialized by it. If it is callable, it is used as a weight
initializer. Note that in these cases, ``dtype`` argument is
ignored.
"""
warnings.warn('''\
Parameter registeration via Link.__init__ and Link.add_param are deprecated.
Assign a Parameter object directly to an attribute within a \
"with Link.init_scope():" block instead.
''', DeprecationWarning)
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name, value):
"""Registers a persistent value to the link.
The registered value is saved and loaded on serialization and
deserialization. The value is set to an attribute of the link.
Args:
name (str): Name of the persistent value. This name is also used
for the attribute name.
value: Value to be registered.
"""
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name):
"""Registers an attribute of a given name as a persistent value.
This is a convenient method to register an existing attribute as a
persistent value. If ``name`` has been already registered as a
parameter, this method removes it from the list of parameter names
and re-registers it as a persistent value.
Args:
name (str): Name of the attribute to be registered.
"""
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
def copy(self):
"""Copies the link hierarchy to new one.
The whole hierarchy rooted by this link is copied. The copy is
basically shallow, except that the parameter variables are also
shallowly copied. It means that the parameter variables of copied one
are different from ones of original link, while they share the data and
gradient arrays.
The name of the link is reset on the copy, since the copied instance
does not belong to the original parent chain (even if exists).
Returns:
Link: Copied link object.
"""
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
def to_cpu(self):
"""Copies parameter variables and persistent values to CPU.
This method does not handle non-registered attributes. If some of such
attributes must be copied to CPU, the link implementation must
override this method to do so.
Returns: self
"""
if self._cpu:
return self
d = self.__dict__
for name in self._params:
d[name].to_cpu()
for name in self._persistent:
value = d[name]
if isinstance(value, cuda.ndarray):
d[name] = value.get()
self._cpu = True
self._device_id = None
return self
def to_gpu(self, device=None):
"""Copies parameter variables and persistent values to GPU.
This method does not handle non-registered attributes. If some of such
attributes must be copied to GPU, the link implementation must
override this method to do so.
Args:
device: Target device specifier. If omitted, the current device is
used.
Returns: self
"""
cuda.check_cuda_available()
if not self._cpu:
return self
d = self.__dict__
with cuda._get_device(device):
for name in self._params:
d[name].to_gpu()
for name in self._persistent:
value = d[name]
if isinstance(value, numpy.ndarray):
d[name] = cuda.to_gpu(value)
self._device_id = cuda.cupy.cuda.get_device_id()
self._cpu = False
return self
def params(self, include_uninit=True):
"""Returns a generator of all parameters under the link hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all parameters.
"""
d = self.__dict__
for name in self._params:
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(self, include_uninit=True):
"""Returns a generator of all (path, param) pairs under the hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all (path, parameter) pairs. The
paths are relative from this link.
"""
d = self.__dict__
for name in self._params:
if include_uninit or d[name].data is not None:
yield '/' + name, d[name]
def links(self, skipself=False):
"""Returns a generator of all links under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all links.
"""
if not skipself:
yield self
def namedlinks(self, skipself=False):
"""Returns a generator of all (path, link) pairs under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all (path, link) pairs.
"""
if not skipself:
yield '/', self
def children(self):
"""Returns a generator of all child links.
Returns:
A generator object that generates all child links.
"""
if 0:
yield
def copyparams(self, link):
"""Copies all parameters from given link.
This method copies data arrays of all parameters in the hierarchy. The
copy is even done across the host and devices. Note that this method
does not copy the gradient arrays.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
def cleargrads(self):
"""Clears all gradient arrays.
This method should be called before the backward computation at every
iteration of the optimization.
"""
for param in self.params():
param.cleargrad()
def zerograds(self):
"""Initializes all gradient arrays by zero.
This method can be used for the same purpose of cleargrads, but less
efficient. This method is left for backward compatibility.
.. deprecated:: v1.15
Use :meth:`cleargrads` instead.
"""
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link):
"""Accumulates gradient values from given link.
This method adds each gradient array of the given link to corresponding
gradient array of this link. The accumulation is even done across
host and different devices.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self):
"""Enables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``True``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self):
"""Disables update rules of all parameters under the link hierarchy.
This method sets the :attr:~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``False``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self):
"""``True`` if at least one parameter has an update rule enabled."""
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer):
"""Serializes the link object.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
d = self.__dict__
for name in self._params:
param = d[name]
data = serializer(name, param.data)
if param.data is None and data is not None:
# Initialize the parameter here
param.initialize(data.shape)
if isinstance(param.data, numpy.ndarray):
numpy.copyto(param.data, data)
else:
param.data.set(numpy.asarray(data))
for name in self._persistent:
d[name] = serializer(name, d[name])
class Chain(Link):
"""Composable link with object-like interface.
Composability is one of the most important features of neural nets. Neural
net models consist of many reusable fragments, and each model itself might
be embedded into a larger learnable system. Chain enables us to write a
neural net based on composition, without bothering about routine works like
collecting parameters, serialization, copying the structure with parameters
shared, etc.
This class actually provides a way to compose one or more links into one
structure. A chain can contain one or more *child links*. Child link is a
link registered to the chain with its own name. The child link is stored to
an attribute of the chain with the name. User can write a whole model or a
fragment of neural nets as a child class of Chain.
Each chain itself is also a link. Therefore, one can combine chains into
higher-level chains. In this way, links and chains construct a *link
hierarchy*. Link hierarchy forms a tree structure, where each node is
identified by the path from the root. The path is represented by a string
like a file path in UNIX, consisting of names of nodes on the path, joined
by slashes ``/``.
A child link can be added just by assigning it to an attribute of the
chain within :meth:`an initialization scope <chainer.Link.init_scope>`.
The registered child link is saved and loaded on serialization and
deserialization, and involved in the optimization. The registered link
is called a child. The child link is accessible via :meth:`children`
generator, which returns a generator running through the children in
registered order.
On registration of a child link, its :attr:`~Link.name` attribute is also
set (or overwritten if the link has already been registered to another
chain).
.. admonition:: Example
This is a simple example of custom chain definition. Chainer itself also
provides some chains defined under the :mod:`~chainer.links` module.
They might serve as examples, too.
Consider we want to define a multi-layer perceptron consisting of two
hidden layers with rectifiers as activation functions. We can use the
:class:`~chainer.links.Linear` link as a building block::
import chainer
import chainer.functions as F
import chainer.links as L
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultilayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def __call__(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
Child links are registered via the assignment within a
``with self.init_scope():`` block. The forward propagation is often
implemented as The ``__call__`` operator as the above example, though
it is not mandatory.
Args:
links: Child links. The keywords are used as their names. The names are
also set to the links.
.. deprecated:: v2.0.0
Assign child links directly to attributes, instead.
"""
def __init__(self, **links):
super(Chain, self).__init__()
self._children = set()
for name, link in six.iteritems(links):
self.add_link(name, link)
def __getitem__(self, name):
"""Equivalent to getattr."""
return getattr(self, name)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name):
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name, link):
"""Registers a child link to this chain.
.. deprecated:: v2.0.0
Assign the child link directly to an attribute within
:meth:`an initialization scope <chainer.Link.init_scope>`, instead.
For example, the following code
.. code-block:: python
chain.add_link('l1', L.Linear(3, 5))
can be replaced by the following line.
.. code-block:: python
with self.init_scope():
chain.l1 = L.Linear(3, 5)
The latter one is easier for IDEs to keep track of the attribute's
type.
Args:
name (str): Name of the child link. This name is also used as the
attribute name.
link (Link): The link object to be registered.
"""
warnings.warn('''\
Child link registeration via Chain.__init__ and Chain.add_link are deprecated.
Assign a Link object directly to an attribute within a \
"with link.init_scope():" block instead.
''', DeprecationWarning)
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self):
ret = super(Chain, self).copy()
ret._children = set(ret._children)
d = ret.__dict__
for name in ret._children:
# copy child links recursively
copied = d[name].copy()
copied.name = name
d[name] = copied
return ret
def to_cpu(self):
super(Chain, self).to_cpu()
d = self.__dict__
for name in self._children:
d[name].to_cpu()
return self
def to_gpu(self, device=None):
with cuda._get_device(device):
super(Chain, self).to_gpu()
d = self.__dict__
for name in self._children:
d[name].to_gpu()
return self
def params(self, include_uninit=True):
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__
for name in self._children:
for param in d[name].params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__
for name in self._children:
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
d = self.__dict__
for name in self._children:
for link in d[name].links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
d = self.__dict__
for name in self._children:
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self):
d = self.__dict__
for name in self._children:
yield d[name]
def copyparams(self, link):
super(Chain, self).copyparams(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name])
def addgrads(self, link):
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer):
super(Chain, self).serialize(serializer)
d = self.__dict__
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link):
"""Composable link with list-like interface.
This is another example of compositional link. Unlike :class:`Chain`, this
class can be used like a list of child links. Each child link is indexed by
a non-negative integer, and it maintains the current number of registered
child links. The :meth:`add_link` method inserts a new link at the end of
the list. It is useful to write a chain with arbitrary number of child
links, e.g. an arbitrarily deep multi-layer perceptron.
Note that this class does not implement all methods of :class:`list`.
Args:
links: Initial child links.
"""
def __init__(self, *links):
super(ChainList, self).__init__()
self._children = []
for link in links:
self.add_link(link)
def __getitem__(self, index):
"""Returns the child at given index.
Args:
index (int): Index of the child in the list.
Returns:
Link: The ``index``-th child link.
"""
return self._children[index]
def __iter__(self):
return iter(self._children)
def __len__(self):
"""Returns the number of children."""
return len(self._children)
def append(self, link):
"""Registers a child link and adds it to the tail of the list.
This is equivalent to :meth:`add_link`. This method has been added to
emulate the ``list`` interface.
Args:
link (Link): The link object to be regsitered.
"""
self.add_link(link)
def add_link(self, link):
"""Registers a child link and adds it to the tail of the list.
Args:
link (Link): The link object to be registered.
"""
link.name = str(len(self._children))
self._children.append(link)
def copy(self):
ret = super(ChainList, self).copy()
ret._children = list(ret._children) # copy
children = ret._children
for i, child in enumerate(children):
child = child.copy()
child.name = str(i)
children[i] = child
return ret
def to_cpu(self):
super(ChainList, self).to_cpu()
for link in self._children:
link.to_cpu()
return self
def to_gpu(self, device=None):
with cuda._get_device(device):
super(ChainList, self).to_gpu()
for link in self._children:
link.to_gpu()
return self
def params(self, include_uninit=True):
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self):
for child in self._children:
yield child
def copyparams(self, link):
super(ChainList, self).copyparams(link)
for idx, child in enumerate(self._children):
child.copyparams(link[idx])
def addgrads(self, link):
super(ChainList, self).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx])
def serialize(self, serializer):
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
fix typo
import collections
import contextlib
import copy
import warnings
import numpy
import six
from chainer import cuda
from chainer import initializers
from chainer import variable
def _is_shape(value):
if value is None:
return True
elif isinstance(value, collections.Sequence):
try:
return all(int(x) for x in value)
except TypeError:
return False
try:
return int(value)
except TypeError:
return False
def _ensure_shape_dtype(value):
# Return value paired with dtype FP32 if it is a shape.
if _is_shape(value):
return value, 'f'
# Otherwise, returns it with assuming a shape-dtype pair.
else:
return value
class Link(object):
"""Building block of model definitions.
Link is a building block of neural network models that support various
features like handling parameters, defining network fragments,
serialization, etc.
Link is the primitive structure for the model definitions. It supports
management of parameter variables and *persistent values* that should be
incorporated to serialization.
Parameter is an instance of :class:`~chainer.Parameter` registered to a
link. A :class:`~chainer.Parameter` object can be registered as a
parameter of the link by assigning it to an attribute within *an
initialization scope*, which is a code surrounded by a
:meth:`init_scope` context manager using the ``with`` statement.
Persistent values are arrays, scalars, or any other serializable values
registered via :meth:`register_persistent` or :meth:`add_persistent`.
.. note::
Whereas arbitrary serializable objects can be registered as persistent
values, it is strongly recommended to just register values that should
be treated as results of learning. A typical example of persistent
values is ones computed during training and required for testing, e.g.
running statistics for batch normalization.
Parameters and persistent values are referred by their names. They can be
accessed as attributes of the links. Link class itself manages the lists
of names of parameters and persistent values to distinguish parameters and
persistent values from other attributes.
Link can be composed into more complex models. This composition feature is
supported by child classes like :class:`Chain` and :class:`ChainList`. One
can create a chain by combining one or more links. See the documents for
these classes for details.
As noted above, Link supports the serialization protocol of the
:class:`~chainer.Serializer` class. **Note that only parameters and
persistent values are saved and loaded.** Other attributes are considered
as a part of user program (i.e. a part of network definition). In order to
construct a link from saved file, other attributes must be identically
reconstructed by user codes.
.. admonition:: Example
This is a simple example of custom link definition. Chainer itself also
provides many links defined under the :mod:`~chainer.links` module. They
might serve as examples, too.
Consider we want to define a simple primitive link that implements a
fully-connected layer based on the :func:`~functions.linear` function.
Note that this function takes input units, a weight variable, and a bias
variable as arguments. Then, the fully-connected layer can be defined as
follows::
import chainer
import chainer.functions as F
from chainer import initializers
import numpy as np
class LinearLayer(chainer.Link):
def __init__(self, n_in, n_out):
super(LinearLayer, self).__init__()
with self.init_scope():
self.W = chainer.Parameter(
initializers.Normal(), (n_out, n_in))
self.b = chainer.Parameter(
initializers.Zero(), (n_out,))
def __call__(self, x):
return F.linear(x, self.W, self.b)
This example shows that a user can define arbitrary parameters and use
them in any methods. Links typically implement the ``__call__``
operator, although they can also provide other methods to implement the
forward propagation.
Args:
params: *(deprecated since v2.0.0)* Names, shapes, and optional dtypes
of initial parameters. The keywords are used as the parameter
names and the corresponding values consist either of the shape or
a tuple of shape and a dtype ``(shape, dtype)``. If only the shape
is supplied, the default dtype will be used.
Attributes:
name (str): Name of this link, given by the parent chain (if exists).
"""
def __init__(self, **params):
self._params = set()
self._persistent = set()
self._cpu = True
self._device_id = None
self._within_init_scope = False
self.name = None
for name, value in six.iteritems(params):
# Note: deprecation warning will be raised in add_param
shape, dtype = _ensure_shape_dtype(value)
self.add_param(name, shape, dtype=dtype)
@property
def xp(self):
"""Array module for this link.
Depending on which of CPU/GPU this link is on, this property returns
:mod:`numpy` or :mod:`cupy`.
"""
return numpy if self._cpu else cuda.cupy
@property
def within_init_scope(self):
"""True if the current code is inside of an initialization scope.
See :meth:`init_scope` for the details of the initialization scope.
"""
return getattr(self, '_within_init_scope', False)
@contextlib.contextmanager
def init_scope(self):
"""Creates an initialization scope.
This method returns a context manager object that enables registration
of parameters (and links for :class:`~chainer.Chain`) by an assignment.
A :class:`~chainer.Parameter` object can be automatically registered
by assigning it to an attribute under this context manager.
.. admonition:: Example
In most cases, the parameter registration is done in the
initializer method. Using the ``init_scope`` method, we can
simply assign a :class:`~chainer.Parameter` object to register
it to the link.
.. code-block:: python
class MyLink(chainer.Link):
def __init__(self):
super().__init__()
with self.init_scope():
self.W = chainer.Parameter(0, (10, 5))
self.b = chainer.Parameter(0, (5,))
"""
old_flag = self.within_init_scope
self._within_init_scope = True
try:
yield
finally:
self._within_init_scope = old_flag
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, variable.Parameter):
value.name = name
if not self._cpu:
value.to_gpu(self._device_id)
self._params.add(name)
self._persistent.discard(name)
super(Link, self).__setattr__(name, value)
def __delattr__(self, name):
self._params.discard(name)
self._persistent.discard(name)
super(Link, self).__delattr__(name)
def add_param(self, name, shape=None, dtype=numpy.float32,
initializer=None):
"""Registers a parameter to the link.
.. deprecated:: v2.0.0
Assign a :class:`~chainer.Parameter` object directly to an
attribute within :meth:`an initialization scope <init_scope>`
instead. For example, the following code
.. code-block:: python
link.add_param('W', shape=(5, 3))
can be replaced by the following assignment.
.. code-block:: python
with self.init_scope():
link.W = chainer.Parameter(None, (5, 3))
The latter one is easier for IDEs to keep track of the attribute's
type.
Args:
name (str): Name of the parameter. This name is also used as the
attribute name.
shape (int or tuple of ints): Shape of the parameter array. If it
is omitted, the parameter variable is left uninitialized.
dtype: Data type of the parameter array.
initializer: If it is not ``None``, the data is initialized with
the given initializer. If it is an array, the data is directly
initialized by it. If it is callable, it is used as a weight
initializer. Note that in these cases, ``dtype`` argument is
ignored.
"""
warnings.warn('''\
Parameter registeration via Link.__init__ and Link.add_param are deprecated.
Assign a Parameter object directly to an attribute within a \
"with Link.init_scope():" block instead.
''', DeprecationWarning)
if name in self.__dict__:
raise AttributeError(
'cannot register a new parameter %s: attribute exists'
% name)
if initializer is None:
initializer = initializers.NaN(dtype)
param = variable.Parameter(initializer, shape)
with self.init_scope():
setattr(self, name, param)
def add_persistent(self, name, value):
"""Registers a persistent value to the link.
The registered value is saved and loaded on serialization and
deserialization. The value is set to an attribute of the link.
Args:
name (str): Name of the persistent value. This name is also used
for the attribute name.
value: Value to be registered.
"""
d = self.__dict__
if name in d:
raise AttributeError(
'cannot register a new persistent value %s: attribute exists'
% name)
self._persistent.add(name)
self._params.discard(name)
d[name] = value
def register_persistent(self, name):
"""Registers an attribute of a given name as a persistent value.
This is a convenient method to register an existing attribute as a
persistent value. If ``name`` has been already registered as a
parameter, this method removes it from the list of parameter names
and re-registers it as a persistent value.
Args:
name (str): Name of the attribute to be registered.
"""
if not hasattr(self, name):
raise AttributeError(
'cannot register non-existent attribute %s as a persistent '
'value' % name)
self._persistent.add(name)
self._params.discard(name)
def copy(self):
"""Copies the link hierarchy to new one.
The whole hierarchy rooted by this link is copied. The copy is
basically shallow, except that the parameter variables are also
shallowly copied. It means that the parameter variables of copied one
are different from ones of original link, while they share the data and
gradient arrays.
The name of the link is reset on the copy, since the copied instance
does not belong to the original parent chain (even if exists).
Returns:
Link: Copied link object.
"""
ret = copy.copy(self)
ret._params = set(self._params)
ret._persistent = set(self._persistent)
ret.name = None
d = ret.__dict__
for name in ret._params:
d[name] = copy.copy(d[name])
d[name].grad = None
return ret
def to_cpu(self):
"""Copies parameter variables and persistent values to CPU.
This method does not handle non-registered attributes. If some of such
attributes must be copied to CPU, the link implementation must
override this method to do so.
Returns: self
"""
if self._cpu:
return self
d = self.__dict__
for name in self._params:
d[name].to_cpu()
for name in self._persistent:
value = d[name]
if isinstance(value, cuda.ndarray):
d[name] = value.get()
self._cpu = True
self._device_id = None
return self
def to_gpu(self, device=None):
"""Copies parameter variables and persistent values to GPU.
This method does not handle non-registered attributes. If some of such
attributes must be copied to GPU, the link implementation must
override this method to do so.
Args:
device: Target device specifier. If omitted, the current device is
used.
Returns: self
"""
cuda.check_cuda_available()
if not self._cpu:
return self
d = self.__dict__
with cuda._get_device(device):
for name in self._params:
d[name].to_gpu()
for name in self._persistent:
value = d[name]
if isinstance(value, numpy.ndarray):
d[name] = cuda.to_gpu(value)
self._device_id = cuda.cupy.cuda.get_device_id()
self._cpu = False
return self
def params(self, include_uninit=True):
"""Returns a generator of all parameters under the link hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all parameters.
"""
d = self.__dict__
for name in self._params:
if include_uninit or d[name].data is not None:
yield d[name]
def namedparams(self, include_uninit=True):
"""Returns a generator of all (path, param) pairs under the hierarchy.
Args:
include_uninit (bool): If ``True``, it also generates uninitialized
parameters.
Returns:
A generator object that generates all (path, parameter) pairs. The
paths are relative from this link.
"""
d = self.__dict__
for name in self._params:
if include_uninit or d[name].data is not None:
yield '/' + name, d[name]
def links(self, skipself=False):
"""Returns a generator of all links under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all links.
"""
if not skipself:
yield self
def namedlinks(self, skipself=False):
"""Returns a generator of all (path, link) pairs under the hierarchy.
Args:
skipself (bool): If ``True``, then the generator skips this link
and starts with the first child link.
Returns:
A generator object that generates all (path, link) pairs.
"""
if not skipself:
yield '/', self
def children(self):
"""Returns a generator of all child links.
Returns:
A generator object that generates all child links.
"""
if 0:
yield
def copyparams(self, link):
"""Copies all parameters from given link.
This method copies data arrays of all parameters in the hierarchy. The
copy is even done across the host and devices. Note that this method
does not copy the gradient arrays.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].copydata(src[name])
def cleargrads(self):
"""Clears all gradient arrays.
This method should be called before the backward computation at every
iteration of the optimization.
"""
for param in self.params():
param.cleargrad()
def zerograds(self):
"""Initializes all gradient arrays by zero.
This method can be used for the same purpose of cleargrads, but less
efficient. This method is left for backward compatibility.
.. deprecated:: v1.15
Use :meth:`cleargrads` instead.
"""
warnings.warn(
'Link.zerograds is deprecated. Use Link.cleargrads instead.',
DeprecationWarning)
for param in self.params():
param.zerograd()
def addgrads(self, link):
"""Accumulates gradient values from given link.
This method adds each gradient array of the given link to corresponding
gradient array of this link. The accumulation is even done across
host and different devices.
Args:
link (Link): Source link object.
"""
src = link.__dict__
dst = self.__dict__
for name in self._params:
dst[name].addgrad(src[name])
def enable_update(self):
"""Enables update rules of all parameters under the link hierarchy.
This method sets the :attr:`~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``True``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = True
def disable_update(self):
"""Disables update rules of all parameters under the link hierarchy.
This method sets the :attr:~chainer.UpdateRule.enabled` flag of the
update rule of each parameter variable to ``False``.
"""
for param in self.params():
rule = param.update_rule
if rule is not None:
rule.enabled = False
@property
def update_enabled(self):
"""``True`` if at least one parameter has an update rule enabled."""
for param in self.params():
rule = param.update_rule
if rule is not None and rule.enabled:
return True
return False
def serialize(self, serializer):
"""Serializes the link object.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
d = self.__dict__
for name in self._params:
param = d[name]
data = serializer(name, param.data)
if param.data is None and data is not None:
# Initialize the parameter here
param.initialize(data.shape)
if isinstance(param.data, numpy.ndarray):
numpy.copyto(param.data, data)
else:
param.data.set(numpy.asarray(data))
for name in self._persistent:
d[name] = serializer(name, d[name])
class Chain(Link):
"""Composable link with object-like interface.
Composability is one of the most important features of neural nets. Neural
net models consist of many reusable fragments, and each model itself might
be embedded into a larger learnable system. Chain enables us to write a
neural net based on composition, without bothering about routine works like
collecting parameters, serialization, copying the structure with parameters
shared, etc.
This class actually provides a way to compose one or more links into one
structure. A chain can contain one or more *child links*. Child link is a
link registered to the chain with its own name. The child link is stored to
an attribute of the chain with the name. User can write a whole model or a
fragment of neural nets as a child class of Chain.
Each chain itself is also a link. Therefore, one can combine chains into
higher-level chains. In this way, links and chains construct a *link
hierarchy*. Link hierarchy forms a tree structure, where each node is
identified by the path from the root. The path is represented by a string
like a file path in UNIX, consisting of names of nodes on the path, joined
by slashes ``/``.
A child link can be added just by assigning it to an attribute of the
chain within :meth:`an initialization scope <chainer.Link.init_scope>`.
The registered child link is saved and loaded on serialization and
deserialization, and involved in the optimization. The registered link
is called a child. The child link is accessible via :meth:`children`
generator, which returns a generator running through the children in
registered order.
On registration of a child link, its :attr:`~Link.name` attribute is also
set (or overwritten if the link has already been registered to another
chain).
.. admonition:: Example
This is a simple example of custom chain definition. Chainer itself also
provides some chains defined under the :mod:`~chainer.links` module.
They might serve as examples, too.
Consider we want to define a multi-layer perceptron consisting of two
hidden layers with rectifiers as activation functions. We can use the
:class:`~chainer.links.Linear` link as a building block::
import chainer
import chainer.functions as F
import chainer.links as L
class MultiLayerPerceptron(chainer.Chain):
def __init__(self, n_in, n_hidden, n_out):
super(MultilayerPerceptron, self).__init__()
with self.init_scope():
self.layer1 = L.Linear(n_in, n_hidden)
self.layer2 = L.Linear(n_hidden, n_hidden)
self.layer3 = L.Linear(n_hidden, n_out)
def __call__(self, x):
# Forward propagation
h1 = F.relu(self.layer1(x))
h2 = F.relu(self.layer2(h1))
return self.layer3(h2)
Child links are registered via the assignment within a
``with self.init_scope():`` block. The forward propagation is often
implemented as the ``__call__`` operator as the above example, though
it is not mandatory.
Args:
links: Child links. The keywords are used as their names. The names are
also set to the links.
.. deprecated:: v2.0.0
Assign child links directly to attributes, instead.
"""
def __init__(self, **links):
super(Chain, self).__init__()
self._children = set()
for name, link in six.iteritems(links):
self.add_link(name, link)
def __getitem__(self, name):
"""Equivalent to getattr."""
return getattr(self, name)
def __setattr__(self, name, value):
if self.within_init_scope and isinstance(value, Link):
if hasattr(self, name):
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
value.name = name
self._children.add(name)
super(Chain, self).__setattr__(name, value)
def __delattr__(self, name):
self._children.discard(name)
super(Chain, self).__delattr__(name)
def add_link(self, name, link):
"""Registers a child link to this chain.
.. deprecated:: v2.0.0
Assign the child link directly to an attribute within
:meth:`an initialization scope <chainer.Link.init_scope>`, instead.
For example, the following code
.. code-block:: python
chain.add_link('l1', L.Linear(3, 5))
can be replaced by the following line.
.. code-block:: python
with self.init_scope():
chain.l1 = L.Linear(3, 5)
The latter one is easier for IDEs to keep track of the attribute's
type.
Args:
name (str): Name of the child link. This name is also used as the
attribute name.
link (Link): The link object to be registered.
"""
warnings.warn('''\
Child link registeration via Chain.__init__ and Chain.add_link are deprecated.
Assign a Link object directly to an attribute within a \
"with link.init_scope():" block instead.
''', DeprecationWarning)
if name in self.__dict__:
raise AttributeError(
'cannot register a new link %s: attribute exists' % name)
if not isinstance(link, Link):
raise TypeError('cannot register a non-link object as a child')
with self.init_scope():
setattr(self, name, link)
def copy(self):
ret = super(Chain, self).copy()
ret._children = set(ret._children)
d = ret.__dict__
for name in ret._children:
# copy child links recursively
copied = d[name].copy()
copied.name = name
d[name] = copied
return ret
def to_cpu(self):
super(Chain, self).to_cpu()
d = self.__dict__
for name in self._children:
d[name].to_cpu()
return self
def to_gpu(self, device=None):
with cuda._get_device(device):
super(Chain, self).to_gpu()
d = self.__dict__
for name in self._children:
d[name].to_gpu()
return self
def params(self, include_uninit=True):
for param in super(Chain, self).params(include_uninit):
yield param
d = self.__dict__
for name in self._children:
for param in d[name].params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(Chain, self).namedparams(include_uninit):
yield ret
d = self.__dict__
for name in self._children:
prefix = '/' + name
for path, param in d[name].namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
d = self.__dict__
for name in self._children:
for link in d[name].links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
d = self.__dict__
for name in self._children:
child = d[name]
prefix = '/' + name
yield prefix, child
for path, link in d[name].namedlinks(True):
yield prefix + path, link
def children(self):
d = self.__dict__
for name in self._children:
yield d[name]
def copyparams(self, link):
super(Chain, self).copyparams(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].copyparams(src[name])
def addgrads(self, link):
super(Chain, self).addgrads(link)
src = link.__dict__
dst = self.__dict__
for name in self._children:
dst[name].addgrads(src[name])
def serialize(self, serializer):
super(Chain, self).serialize(serializer)
d = self.__dict__
for name in self._children:
d[name].serialize(serializer[name])
class ChainList(Link):
"""Composable link with list-like interface.
This is another example of compositional link. Unlike :class:`Chain`, this
class can be used like a list of child links. Each child link is indexed by
a non-negative integer, and it maintains the current number of registered
child links. The :meth:`add_link` method inserts a new link at the end of
the list. It is useful to write a chain with arbitrary number of child
links, e.g. an arbitrarily deep multi-layer perceptron.
Note that this class does not implement all methods of :class:`list`.
Args:
links: Initial child links.
"""
def __init__(self, *links):
super(ChainList, self).__init__()
self._children = []
for link in links:
self.add_link(link)
def __getitem__(self, index):
"""Returns the child at given index.
Args:
index (int): Index of the child in the list.
Returns:
Link: The ``index``-th child link.
"""
return self._children[index]
def __iter__(self):
return iter(self._children)
def __len__(self):
"""Returns the number of children."""
return len(self._children)
def append(self, link):
"""Registers a child link and adds it to the tail of the list.
This is equivalent to :meth:`add_link`. This method has been added to
emulate the ``list`` interface.
Args:
link (Link): The link object to be regsitered.
"""
self.add_link(link)
def add_link(self, link):
"""Registers a child link and adds it to the tail of the list.
Args:
link (Link): The link object to be registered.
"""
link.name = str(len(self._children))
self._children.append(link)
def copy(self):
ret = super(ChainList, self).copy()
ret._children = list(ret._children) # copy
children = ret._children
for i, child in enumerate(children):
child = child.copy()
child.name = str(i)
children[i] = child
return ret
def to_cpu(self):
super(ChainList, self).to_cpu()
for link in self._children:
link.to_cpu()
return self
def to_gpu(self, device=None):
with cuda._get_device(device):
super(ChainList, self).to_gpu()
for link in self._children:
link.to_gpu()
return self
def params(self, include_uninit=True):
for param in super(ChainList, self).params(include_uninit):
yield param
for link in self._children:
for param in link.params(include_uninit):
yield param
def namedparams(self, include_uninit=True):
for ret in super(ChainList, self).namedparams(include_uninit):
yield ret
for idx, link in enumerate(self._children):
prefix = '/%d' % idx
for path, param in link.namedparams(include_uninit):
yield prefix + path, param
def links(self, skipself=False):
if not skipself:
yield self
for child in self._children:
for link in child.links():
yield link
def namedlinks(self, skipself=False):
if not skipself:
yield '/', self
for idx, child in enumerate(self._children):
prefix = '/%d' % idx
yield prefix, child
for path, link in child.namedlinks(True):
yield prefix + path, link
def children(self):
for child in self._children:
yield child
def copyparams(self, link):
super(ChainList, self).copyparams(link)
for idx, child in enumerate(self._children):
child.copyparams(link[idx])
def addgrads(self, link):
super(ChainList, self).addgrads(link)
for idx, child in enumerate(self._children):
child.addgrads(link[idx])
def serialize(self, serializer):
super(ChainList, self).serialize(serializer)
for idx, child in enumerate(self._children):
child.serialize(serializer['%d' % idx])
|
import argparse
from rasa.cli.arguments.default_arguments import (
add_domain_param,
add_stories_param,
add_model_param,
add_endpoint_param,
)
from rasa.cli.arguments.train import (
add_force_param,
add_data_param,
add_config_param,
add_out_param,
add_debug_plots_param,
add_dump_stories_param,
add_augmentation_param,
)
def set_interactive_arguments(parser: argparse.ArgumentParser):
add_model_param(parser, default=None)
add_data_param(parser)
add_skip_visualization_param(parser)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a yml file.",
)
train_arguments = parser.add_argument_group("Train Arguments")
add_config_param(train_arguments)
add_domain_param(train_arguments)
add_out_param(train_arguments)
add_force_param(train_arguments)
def set_interactive_core_arguments(parser: argparse.ArgumentParser):
add_model_param(parser, model_name="Rasa Core", default=None)
add_stories_param(parser)
add_skip_visualization_param(parser)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a yml file.",
)
train_arguments = parser.add_argument_group("Train Arguments")
add_config_param(train_arguments)
add_domain_param(train_arguments)
add_out_param(train_arguments)
add_augmentation_param(train_arguments)
add_debug_plots_param(train_arguments)
add_dump_stories_param(train_arguments)
def add_skip_visualization_param(parser: argparse.ArgumentParser):
parser.add_argument(
"--skip-visualization",
default=False,
action="store_true",
help="Disables plotting the visualization during interactive learning.",
)
Add more train arguments to rasa interactive.
import argparse
from rasa.cli.arguments.default_arguments import (
add_domain_param,
add_stories_param,
add_model_param,
add_endpoint_param,
)
from rasa.cli.arguments.train import (
add_force_param,
add_data_param,
add_config_param,
add_out_param,
add_debug_plots_param,
add_dump_stories_param,
add_augmentation_param,
)
def set_interactive_arguments(parser: argparse.ArgumentParser):
add_model_param(parser, default=None)
add_data_param(parser)
add_skip_visualization_param(parser)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a yml file.",
)
train_arguments = parser.add_argument_group("Train Arguments")
add_config_param(train_arguments)
add_domain_param(train_arguments)
add_out_param(train_arguments)
add_augmentation_param(train_arguments)
add_debug_plots_param(train_arguments)
add_dump_stories_param(train_arguments)
add_force_param(train_arguments)
def set_interactive_core_arguments(parser: argparse.ArgumentParser):
add_model_param(parser, model_name="Rasa Core", default=None)
add_stories_param(parser)
add_skip_visualization_param(parser)
add_endpoint_param(
parser,
help_text="Configuration file for the model server and the connectors as a yml file.",
)
train_arguments = parser.add_argument_group("Train Arguments")
add_config_param(train_arguments)
add_domain_param(train_arguments)
add_out_param(train_arguments)
add_augmentation_param(train_arguments)
add_debug_plots_param(train_arguments)
add_dump_stories_param(train_arguments)
def add_skip_visualization_param(parser: argparse.ArgumentParser):
parser.add_argument(
"--skip-visualization",
default=False,
action="store_true",
help="Disables plotting the visualization during interactive learning.",
)
|
from abc import ABCMeta, abstractmethod
from firedrake import exp, Interpolator, conditional, Function, \
min_value, max_value
__all__ = ["Condensation"]
class Physics(object, metaclass=ABCMeta):
"""
Base class for physics processes for Gusto.
:arg state: :class:`.State` object.
"""
def __init__(self, state):
self.state = state
@abstractmethod
def apply(self):
"""
Function computes the value of specific
fields at the next time step.
"""
pass
class Condensation(Physics):
"""
The process of condensation of water vapour
into liquid water and evaporation of liquid
water into water vapour, with the associated
latent heat changes.
:arg state: :class:`.State.` object.
"""
def __init__(self, state):
super(Condensation, self).__init__(state)
# obtain our fields
self.theta = state.fields('theta')
self.water_v = state.fields('water_v')
self.water_c = state.fields('water_c')
rho = state.fields('rho')
# declare function space
Vt = self.theta.function_space()
param = self.state.parameters
# define some parameters as attributes
dt = self.state.timestepping.dt
R_d = param.R_d
p_0 = param.p_0
kappa = param.kappa
cp = param.cp
cv = param.cv
c_pv = param.c_pv
c_pl = param.c_pl
c_vv = param.c_vv
R_v = param.R_v
L_v0 = param.L_v0
T_0 = param.T_0
w_sat1 = param.w_sat1
w_sat2 = param.w_sat2
w_sat3 = param.w_sat3
w_sat4 = param.w_sat4
# make useful fields
Pi = ((R_d * rho * self.theta / p_0)
** (kappa / (1.0 - kappa)))
T = Pi * self.theta * R_d / (R_d + self.water_v * R_v)
p = p_0 * Pi ** (1.0 / kappa)
L_v = L_v0 - (c_pl - c_pv) * (T - T_0)
R_m = R_d + R_v * self.water_v
c_pml = cp + c_pv * self.water_v + c_pl * self.water_c
c_vml = cv + c_vv * self.water_v + c_pl * self.water_c
# use Teten's formula to calculate w_sat
w_sat = (w_sat1 /
(p * exp(w_sat2 * (T - T_0) / (T - w_sat3)) - w_sat4))
# make appropriate condensation rate
dot_r_cond = ((self.water_v - w_sat) /
(dt * (1.0 + ((L_v ** 2.0 * w_sat) /
(cp * R_v * T ** 2.0)))))
# make cond_rate function, that needs to be the same for all updates in one time step
self.cond_rate = Function(Vt)
# adjust cond rate so negative concentrations don't occur
self.lim_cond_rate = Interpolator(conditional(dot_r_cond < 0,
max_value(dot_r_cond, - self.water_c / dt),
min_value(dot_r_cond, self.water_v / dt)), self.cond_rate)
# tell the prognostic fields what to update to
self.water_v_new = Interpolator(self.water_v - dt * self.cond_rate, Vt)
self.water_c_new = Interpolator(self.water_c + dt * self.cond_rate, Vt)
self.theta_new = Interpolator(self.theta *
(1.0 + dt * self.cond_rate *
(cv * L_v / (c_vml * cp * T) -
R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)
def apply(self):
self.lim_cond_rate.interpolate()
self.water_v.assign(self.water_v_new.interpolate())
self.water_c.assign(self.water_c_new.interpolate())
self.theta.assign(self.theta_new.interpolate())
introduce fallout
from abc import ABCMeta, abstractmethod
from gusto.transport_equation import EmbeddedDGAdvection
from gusto.advection import SSPRK3
from firedrake import exp, Interpolator, conditional, Function, \
min_value, max_value, as_vector
__all__ = ["Condensation", "Fallout"]
class Physics(object, metaclass=ABCMeta):
"""
Base class for physics processes for Gusto.
:arg state: :class:`.State` object.
"""
def __init__(self, state):
self.state = state
@abstractmethod
def apply(self):
"""
Function computes the value of specific
fields at the next time step.
"""
pass
class Condensation(Physics):
"""
The process of condensation of water vapour
into liquid water and evaporation of liquid
water into water vapour, with the associated
latent heat changes.
:arg state: :class:`.State.` object.
"""
def __init__(self, state):
super(Condensation, self).__init__(state)
# obtain our fields
self.theta = state.fields('theta')
self.water_v = state.fields('water_v')
self.water_c = state.fields('water_c')
rho = state.fields('rho')
# declare function space
Vt = self.theta.function_space()
param = self.state.parameters
# define some parameters as attributes
dt = self.state.timestepping.dt
R_d = param.R_d
p_0 = param.p_0
kappa = param.kappa
cp = param.cp
cv = param.cv
c_pv = param.c_pv
c_pl = param.c_pl
c_vv = param.c_vv
R_v = param.R_v
L_v0 = param.L_v0
T_0 = param.T_0
w_sat1 = param.w_sat1
w_sat2 = param.w_sat2
w_sat3 = param.w_sat3
w_sat4 = param.w_sat4
# make useful fields
Pi = ((R_d * rho * self.theta / p_0)
** (kappa / (1.0 - kappa)))
T = Pi * self.theta * R_d / (R_d + self.water_v * R_v)
p = p_0 * Pi ** (1.0 / kappa)
L_v = L_v0 - (c_pl - c_pv) * (T - T_0)
R_m = R_d + R_v * self.water_v
c_pml = cp + c_pv * self.water_v + c_pl * self.water_c
c_vml = cv + c_vv * self.water_v + c_pl * self.water_c
# use Teten's formula to calculate w_sat
w_sat = (w_sat1 /
(p * exp(w_sat2 * (T - T_0) / (T - w_sat3)) - w_sat4))
# make appropriate condensation rate
dot_r_cond = ((self.water_v - w_sat) /
(dt * (1.0 + ((L_v ** 2.0 * w_sat) /
(cp * R_v * T ** 2.0)))))
# make cond_rate function, that needs to be the same for all updates in one time step
self.cond_rate = Function(Vt)
# adjust cond rate so negative concentrations don't occur
self.lim_cond_rate = Interpolator(conditional(dot_r_cond < 0,
max_value(dot_r_cond, - self.water_c / dt),
min_value(dot_r_cond, self.water_v / dt)), self.cond_rate)
# tell the prognostic fields what to update to
self.water_v_new = Interpolator(self.water_v - dt * self.cond_rate, Vt)
self.water_c_new = Interpolator(self.water_c + dt * self.cond_rate, Vt)
self.theta_new = Interpolator(self.theta *
(1.0 + dt * self.cond_rate *
(cv * L_v / (c_vml * cp * T) -
R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)
def apply(self):
self.lim_cond_rate.interpolate()
self.theta.assign(self.theta_new.interpolate())
self.water_v.assign(self.water_v_new.interpolate())
self.water_c.assign(self.water_c_new.interpolate())
class Fallout(Physics):
"""
The fallout process of hydrometeors.
:arg state :class: `.State.` object.
"""
def __init__(self, state):
super(Fallout, self).__init__(state)
self.state = state
self.rain = state.fields('rain')
# function spaces
Vt = self.rain.function_space()
Vu = state.fields('u').function_space()
# introduce sedimentation rate
# for now assume all rain falls at terminal velocity
terminal_velocity = 10 # in m/s
self.v = state.fields("rainfall_velocity", Vu)
self.v.project(as_vector([0, -terminal_velocity]))
state.fields
advection_equation = EmbeddedDGAdvection(state, Vt, equation_form="advective", outflow=True)
self.advection_method = SSPRK3(state, self.rain, advection_equation)
def apply(self):
for k in range(self.state.timestepping.maxk):
self.advection_method.update_ubar(self.v, self.v, 0)
self.advection_method.apply(self.rain, self. rain)
|
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2017-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from glob import glob
from junit2htmlreport.parser import Junit as JunitParser
from nose2unitth.core import Converter as Nose2UnitthConverter
import iocapture
import os
import pysftp
import shutil
import subprocess
import tempfile
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Generate HTML test history reports
* Generate HTML coverage reports
* Generate HTML API documentation
* Archive reports to lab server and Coveralls
Attributes:
code_server_hostname (:obj:`str`): hostname of server where reports should be uploaded
code_server_username (:obj:`str`): username for server where reports should be uploaded
code_server_password (:obj:`str`): password for server where reports should be uploaded
code_server_base_dir (:obj:`str`): base directory on server where reports should be uploaded
project_name (:obj:`str`): name of project, e.g. GitHub repository name
build_num (:obj:`int`): CircleCI build number
package_dir (:obj:`str`): package directories to generate coverage reports for and document
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_nose_latest_filename (:obj:`str`): file name to store latest XML test report
proj_tests_nose_dir (:obj:`str`): local directory where the test reports generated by nose should be saved
proj_tests_unitth_dir (:obj:`str`): local directory where UnitTH input should be saved
proj_tests_html_dir (:obj:`str`): local directory where HTML test history report should be saved
proj_cov_filename (:obj:`str`): file name where coverage report should be saved
proj_cov_html_dir (:obj:`str`): local directory where HTML coverage report should be saved
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
serv_tests_nose_dir (:obj:`str`): server directory where the test reports generated by nose should be saved
serv_tests_unitth_dir (:obj:`str`): server directory where UnitTH input should be saved
serv_tests_html_dir (:obj:`str`): server directory where HTML test history report should be saved
serv_cov_html_dir (:obj:`str`): server directory where HTML coverage report should be saved
serv_docs_build_html_dir (:obj:`str`): server directory where generated HTML documentation should be saved
build_artifacts_dir (:obj:`str`): directory which CircleCI will record with each build
build_test_dir (:obj:`str`): directory where CircleCI will look for test results
_sftp (:obj:`pysftp.Connection`): sFTP connection to lab server
"""
DEFAULT_CODE_SERVER_HOSTNAME = 'code.karrlab.org'
# :obj:`str`: default hostname of server where reports should be uploaded
DEFAULT_CODE_SERVER_USERNAME = 'karrlab_code'
# :obj:`str`: default username for server where reports should be uploaded
DEFAULT_CODE_SERVER_BASE_DIR = '/home/karrlab_code/code.karrlab.org'
# :obj:`str`: default base directory on server where reports should be uploaded
DEFAULT_PROJ_TESTS_DIR = 'tests'
# :obj:`str`: default local directory with test code
DEFAULT_PROJ_TESTS_NOSE_LATEST_FILENAME = 'latest.xml'
# :obj:`str`: default file name to store latest XML test report
DEFAULT_PROJ_TESTS_NOSE_DIR = 'tests/reports/nose'
# :obj:`str`: default local directory where the test reports generated by nose should be saved
DEFAULT_PROJ_TESTS_UNITTH_DIR = 'tests/reports/unitth'
# :obj:`str`: default local directory where UnitTH input should be saved
DEFAULT_PROJ_TESTS_HTML_DIR = 'tests/reports/html'
# :obj:`str`: default local directory where HTML test history report should be saved
DEFAULT_PROJ_COV_FILENAME = '.coverage'
# :obj:`str`: default coverage file name
DEFAULT_PROJ_COV_HTML_DIR = 'tests/reports/coverage'
# :obj:`str`: default local directory where HTML coverage report should be saved
DEFAULT_PROJ_DOCS_DIR = 'docs'
# :obj:`str`: default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
# :obj:`str`: default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
# :obj:`str`: default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
# :obj:`str`: default local directory where generated HTML documentation should be saved
DEFAULT_SERV_TESTS_NOSE_DIR = 'tests/nose'
# :obj:`str`: default server directory where the test reports generated by nose should be saved
DEFAULT_SERV_TESTS_UNITTH_DIR = 'tests/unitth'
# :obj:`str`: default server directory where UnitTH input should be saved
DEFAULT_SERV_TESTS_HTML_DIR = 'tests/html'
# :obj:`str`: default server directory where HTML test history report should be saved
DEFAULT_SERV_COV_HTML_DIR = 'tests/coverage'
# :obj:`str`: default server directory where HTML coverage report should be saved
DEFAULT_SERV_DOCS_BUILD_HTML_DIR = 'docs'
# :obj:`str`: default server directory where generated HTML documentation should be saved
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.code_server_hostname = os.getenv('CODE_SERVER_HOSTNAME', BuildHelper.DEFAULT_CODE_SERVER_HOSTNAME)
self.code_server_username = os.getenv('CODE_SERVER_USERNAME', BuildHelper.DEFAULT_CODE_SERVER_USERNAME)
self.code_server_password = os.getenv('CODE_SERVER_PASSWORD')
self.code_server_base_dir = os.getenv('CODE_SERVER_BASE_DIR', BuildHelper.DEFAULT_CODE_SERVER_BASE_DIR)
self.project_name = os.getenv('CIRCLE_PROJECT_REPONAME', '')
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM', 0)))
self.package_dir = self.project_name.lower().replace('-', '_')
self.proj_tests_dir = BuildHelper.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_nose_latest_filename = BuildHelper.DEFAULT_PROJ_TESTS_NOSE_LATEST_FILENAME
self.proj_tests_nose_dir = BuildHelper.DEFAULT_PROJ_TESTS_NOSE_DIR
self.proj_tests_unitth_dir = BuildHelper.DEFAULT_PROJ_TESTS_UNITTH_DIR
self.proj_tests_html_dir = BuildHelper.DEFAULT_PROJ_TESTS_HTML_DIR
self.proj_cov_filename = BuildHelper.DEFAULT_PROJ_COV_FILENAME
self.proj_cov_html_dir = BuildHelper.DEFAULT_PROJ_COV_HTML_DIR
self.proj_docs_dir = BuildHelper.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = BuildHelper.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = BuildHelper.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_html_dir = BuildHelper.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.serv_tests_nose_dir = BuildHelper.DEFAULT_SERV_TESTS_NOSE_DIR
self.serv_tests_unitth_dir = BuildHelper.DEFAULT_SERV_TESTS_UNITTH_DIR
self.serv_tests_html_dir = BuildHelper.DEFAULT_SERV_TESTS_HTML_DIR
self.serv_cov_html_dir = BuildHelper.DEFAULT_SERV_COV_HTML_DIR
self.serv_docs_build_html_dir = BuildHelper.DEFAULT_SERV_DOCS_BUILD_HTML_DIR
self.build_artifacts_dir = os.getenv('CIRCLE_ARTIFACTS')
self.build_test_dir = os.getenv('CIRCLE_TEST_REPORTS')
self._sftp = None
########################
# Installing dependencies
########################
def install_requirements(self):
""" Install requirements """
# requirements for package
subprocess.check_call(['sudo', 'pip', 'install', '-r', 'requirements.txt'])
# requirements for testing and documentation
subprocess.check_call(['sudo', 'apt-get', 'install', 'libffi-dev'])
subprocess.check_call(['sudo', 'pip', 'install', '-r', os.path.join(self.proj_tests_dir, 'requirements.txt')])
subprocess.check_call(['sudo', 'pip', 'install', '-r', os.path.join(self.proj_docs_dir, 'requirements.txt')])
########################
# Running tests
########################
def run_tests(self, test_path='tests', with_xml_report=False, with_coverage=False):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to `xml_file`.
Args:
test_path (:obj:`str`, optional): path to tests that should be run
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
xml_file (:obj:`str`, optional): path to save test results
Raises:
:obj:`BuildHelperError`: If package directory not set
"""
cmd = ['nosetests', test_path]
abs_nose_latest_filename = os.path.join(self.proj_tests_nose_dir, self.proj_tests_nose_latest_filename)
if with_xml_report:
cmd.append('--with-xunit')
cmd.append('--xunit-file=%s' % abs_nose_latest_filename)
if not os.path.isdir(self.proj_tests_nose_dir):
os.makedirs(self.proj_tests_nose_dir)
if with_coverage:
if not self.package_dir:
raise BuildHelperError('Package directory not set')
cmd.append('--with-coverage')
cmd.append('--cover-package=%s' % self.package_dir)
subprocess.check_call(cmd)
if with_xml_report and self.build_test_dir:
shutil.copyfile(abs_nose_latest_filename, os.path.join(self.build_test_dir, 'nose.xml'))
def make_and_archive_reports(self):
""" Make and archive reports;
* Generate HTML test history reports
* Generate HTML coverage reports
* Generate HTML API documentation
* Archive reports to lab server and Coveralls
"""
""" test reports """
# create directory with test result history
self.download_nose_test_report_history_from_lab_server()
shutil.copyfile(
os.path.join(self.proj_tests_nose_dir, self.proj_tests_nose_latest_filename),
os.path.join(self.proj_tests_nose_dir, "%d.xml" % self.build_num)
)
# make report of test history
self.make_test_history_report()
# copy test history to lab server
self.archive_test_reports()
""" coverage """
# Create HTML report
self.make_html_coverage_report()
# Copy coverage report to artifacts directory
# Upload coverage report to Coveralls
# Upload coverage report to lab server
self.archive_coverage_report()
""" documentation """
self.make_documentation()
self.archive_documentation()
########################
# Test reports
########################
def download_nose_test_report_history_from_lab_server(self):
""" Download XML test report history from lab server """
if not os.path.isdir(self.proj_tests_nose_dir):
os.makedirs(self.proj_tests_nose_dir)
for report_filename in glob(os.path.join(self.proj_tests_nose_dir, "[0-9]*.xml")):
os.remove(report_filename)
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_tests_nose_dir)
sftp.get_d(self.serv_tests_nose_dir, self.proj_tests_nose_dir)
def make_test_history_report(self):
""" Make an HTML test history report from a directory of nose-style XML test reports
Raises:
:obj:`BuildHelperError`:
"""
# Make XML and HTML test reports that are readable UnitTH
for build_file_path in glob(os.path.join(self.proj_tests_nose_dir, "[0-9]*.xml")):
build_base_name = os.path.basename(build_file_path)
build_num = os.path.splitext(build_base_name)[0]
# Split nose-style XML report into UnitTH-style reports for each package
if not os.path.isdir(os.path.join(self.proj_tests_unitth_dir, build_num)):
os.makedirs(os.path.join(self.proj_tests_unitth_dir, build_num))
Nose2UnitthConverter.run(build_file_path, os.path.join(self.proj_tests_unitth_dir, build_num))
# Make HTML report from nose-style test XML report
with open(os.path.join(os.path.join(self.proj_tests_unitth_dir, build_num, 'index.html')), 'wb') as html_file:
print >> html_file, JunitParser(build_file_path).html()
# Make HTML test history report
if not os.path.isdir(self.proj_tests_html_dir):
os.makedirs(self.proj_tests_html_dir)
subprocess.check_call(''
+ 'java '
+ '-Dunitth.generate.exectimegraphs=true '
+ '-Dunitth.xml.report.filter= '
+ '-Dunitth.html.report.path=. '
+ ('-Dunitth.report.dir=%s ' % self.proj_tests_html_dir)
+ '-jar lib/unitth/unitth.jar '
+ os.path.join(self.proj_tests_unitth_dir, '*'),
shell=True)
def archive_test_reports(self):
""" Archive test report:
* Upload XML and HTML test reports to lab server
"""
self.upload_test_reports_to_lab_server()
def upload_test_reports_to_lab_server(self):
""" Upload XML and HTML test reports to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_tests_nose_dir)
sftp.makedirs(self.serv_tests_unitth_dir)
sftp.makedirs(self.serv_tests_html_dir)
sftp.makedirs(os.path.join(self.serv_tests_unitth_dir, '%d' % self.build_num))
sftp.put(os.path.join(self.proj_tests_nose_dir, '%d.xml' % self.build_num),
os.path.join(self.serv_tests_nose_dir, '%d.xml' % self.build_num))
sftp.put_r(os.path.join(self.proj_tests_unitth_dir, '%d' % self.build_num),
os.path.join(self.serv_tests_unitth_dir, '%d' % self.build_num))
sftp.put_r(self.proj_tests_html_dir, self.serv_tests_html_dir)
########################
# Coverage reports
########################
def make_html_coverage_report(self):
""" Make HTML coverage report from `proj_cov_filename` """
if not os.path.isdir(self.proj_cov_html_dir):
os.makedirs(self.proj_cov_html_dir)
map(os.remove, glob(os.path.join(self.proj_cov_html_dir, '*')))
subprocess.check_call(['coverage', 'html', '-d', self.proj_cov_html_dir])
def archive_coverage_report(self):
""" Archive coverage report:
* Copy report to artifacts directory
* Upload report to Coveralls
* Upload HTML report to lab server
"""
# copy to artifacts directory
self.copy_coverage_report_to_artifacts_directory()
# upload to Coveralls
self.upload_coverage_report_to_coveralls()
# upload to lab server
self.upload_html_coverage_report_to_lab_server()
def copy_coverage_report_to_artifacts_directory(self):
""" Copy coverage report to CircleCI artifacts directory """
if self.build_artifacts_dir:
shutil.copyfile(self.proj_cov_filename, os.path.join(self.build_artifacts_dir, self.proj_cov_filename))
def upload_coverage_report_to_coveralls(self):
""" Upload coverage report to Coveralls """
if os.getenv('COVERALLS_REPO_TOKEN'):
subprocess.check_call('coveralls')
def upload_html_coverage_report_to_lab_server(self):
""" Upload HTML coverage report to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_cov_html_dir)
sftp.put_r(self.proj_cov_html_dir, self.serv_cov_html_dir)
########################
# Documentation
########################
def make_documentation(self):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Raises:
:obj:`BuildHelperError`: If project name or code server password not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
sys.mkdir(self.proj_docs_static_dir)
# compile API documentation
if not self.package_dir:
raise BuildHelperError('Package directory not set')
subprocess.check_call(['sphinx-apidoc', '-f', '-o', self.proj_docs_source_dir, self.package_dir])
# build HTML documentation
subprocess.check_call(['sphinx-build', self.proj_docs_dir, self.proj_docs_build_html_dir])
def archive_documentation(self):
""" Archive documentation:
* Upload documentation to lab server
"""
self.upload_documentation_to_lab_server()
def upload_documentation_to_lab_server(self):
""" Upload documentation to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_docs_build_html_dir)
sftp.put_r(self.proj_docs_build_html_dir, self.serv_docs_build_html_dir)
def connect_to_lab_server(self):
""" Connect to lab server
Raises:
:obj:`BuildHelperError`: If project name or code server password not set
"""
if not self.project_name:
raise BuildHelperError('Project name not set')
if not self.code_server_password:
raise BuildHelperError('Code server password must be set')
if not self._sftp:
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with iocapture.capture() as captured:
self._sftp = pysftp.Connection(self.code_server_hostname,
username=self.code_server_username,
password=self.code_server_password,
cnopts=cnopts
)
self._sftp.makedirs(os.path.join(self.code_server_base_dir, self.project_name))
return self._sftp
def disconnect_from_lab_server(self):
""" Disconnect from lab server """
if self._sftp:
self._sftp.close()
self._sftp = None
class BuildHelperError(Exception):
pass
removing sudo
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2017-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from glob import glob
from junit2htmlreport.parser import Junit as JunitParser
from nose2unitth.core import Converter as Nose2UnitthConverter
import iocapture
import os
import pysftp
import shutil
import subprocess
import tempfile
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Generate HTML test history reports
* Generate HTML coverage reports
* Generate HTML API documentation
* Archive reports to lab server and Coveralls
Attributes:
code_server_hostname (:obj:`str`): hostname of server where reports should be uploaded
code_server_username (:obj:`str`): username for server where reports should be uploaded
code_server_password (:obj:`str`): password for server where reports should be uploaded
code_server_base_dir (:obj:`str`): base directory on server where reports should be uploaded
project_name (:obj:`str`): name of project, e.g. GitHub repository name
build_num (:obj:`int`): CircleCI build number
package_dir (:obj:`str`): package directories to generate coverage reports for and document
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_nose_latest_filename (:obj:`str`): file name to store latest XML test report
proj_tests_nose_dir (:obj:`str`): local directory where the test reports generated by nose should be saved
proj_tests_unitth_dir (:obj:`str`): local directory where UnitTH input should be saved
proj_tests_html_dir (:obj:`str`): local directory where HTML test history report should be saved
proj_cov_filename (:obj:`str`): file name where coverage report should be saved
proj_cov_html_dir (:obj:`str`): local directory where HTML coverage report should be saved
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
serv_tests_nose_dir (:obj:`str`): server directory where the test reports generated by nose should be saved
serv_tests_unitth_dir (:obj:`str`): server directory where UnitTH input should be saved
serv_tests_html_dir (:obj:`str`): server directory where HTML test history report should be saved
serv_cov_html_dir (:obj:`str`): server directory where HTML coverage report should be saved
serv_docs_build_html_dir (:obj:`str`): server directory where generated HTML documentation should be saved
build_artifacts_dir (:obj:`str`): directory which CircleCI will record with each build
build_test_dir (:obj:`str`): directory where CircleCI will look for test results
_sftp (:obj:`pysftp.Connection`): sFTP connection to lab server
"""
DEFAULT_CODE_SERVER_HOSTNAME = 'code.karrlab.org'
# :obj:`str`: default hostname of server where reports should be uploaded
DEFAULT_CODE_SERVER_USERNAME = 'karrlab_code'
# :obj:`str`: default username for server where reports should be uploaded
DEFAULT_CODE_SERVER_BASE_DIR = '/home/karrlab_code/code.karrlab.org'
# :obj:`str`: default base directory on server where reports should be uploaded
DEFAULT_PROJ_TESTS_DIR = 'tests'
# :obj:`str`: default local directory with test code
DEFAULT_PROJ_TESTS_NOSE_LATEST_FILENAME = 'latest.xml'
# :obj:`str`: default file name to store latest XML test report
DEFAULT_PROJ_TESTS_NOSE_DIR = 'tests/reports/nose'
# :obj:`str`: default local directory where the test reports generated by nose should be saved
DEFAULT_PROJ_TESTS_UNITTH_DIR = 'tests/reports/unitth'
# :obj:`str`: default local directory where UnitTH input should be saved
DEFAULT_PROJ_TESTS_HTML_DIR = 'tests/reports/html'
# :obj:`str`: default local directory where HTML test history report should be saved
DEFAULT_PROJ_COV_FILENAME = '.coverage'
# :obj:`str`: default coverage file name
DEFAULT_PROJ_COV_HTML_DIR = 'tests/reports/coverage'
# :obj:`str`: default local directory where HTML coverage report should be saved
DEFAULT_PROJ_DOCS_DIR = 'docs'
# :obj:`str`: default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
# :obj:`str`: default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
# :obj:`str`: default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
# :obj:`str`: default local directory where generated HTML documentation should be saved
DEFAULT_SERV_TESTS_NOSE_DIR = 'tests/nose'
# :obj:`str`: default server directory where the test reports generated by nose should be saved
DEFAULT_SERV_TESTS_UNITTH_DIR = 'tests/unitth'
# :obj:`str`: default server directory where UnitTH input should be saved
DEFAULT_SERV_TESTS_HTML_DIR = 'tests/html'
# :obj:`str`: default server directory where HTML test history report should be saved
DEFAULT_SERV_COV_HTML_DIR = 'tests/coverage'
# :obj:`str`: default server directory where HTML coverage report should be saved
DEFAULT_SERV_DOCS_BUILD_HTML_DIR = 'docs'
# :obj:`str`: default server directory where generated HTML documentation should be saved
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.code_server_hostname = os.getenv('CODE_SERVER_HOSTNAME', BuildHelper.DEFAULT_CODE_SERVER_HOSTNAME)
self.code_server_username = os.getenv('CODE_SERVER_USERNAME', BuildHelper.DEFAULT_CODE_SERVER_USERNAME)
self.code_server_password = os.getenv('CODE_SERVER_PASSWORD')
self.code_server_base_dir = os.getenv('CODE_SERVER_BASE_DIR', BuildHelper.DEFAULT_CODE_SERVER_BASE_DIR)
self.project_name = os.getenv('CIRCLE_PROJECT_REPONAME', '')
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM', 0)))
self.package_dir = self.project_name.lower().replace('-', '_')
self.proj_tests_dir = BuildHelper.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_nose_latest_filename = BuildHelper.DEFAULT_PROJ_TESTS_NOSE_LATEST_FILENAME
self.proj_tests_nose_dir = BuildHelper.DEFAULT_PROJ_TESTS_NOSE_DIR
self.proj_tests_unitth_dir = BuildHelper.DEFAULT_PROJ_TESTS_UNITTH_DIR
self.proj_tests_html_dir = BuildHelper.DEFAULT_PROJ_TESTS_HTML_DIR
self.proj_cov_filename = BuildHelper.DEFAULT_PROJ_COV_FILENAME
self.proj_cov_html_dir = BuildHelper.DEFAULT_PROJ_COV_HTML_DIR
self.proj_docs_dir = BuildHelper.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = BuildHelper.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = BuildHelper.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_html_dir = BuildHelper.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.serv_tests_nose_dir = BuildHelper.DEFAULT_SERV_TESTS_NOSE_DIR
self.serv_tests_unitth_dir = BuildHelper.DEFAULT_SERV_TESTS_UNITTH_DIR
self.serv_tests_html_dir = BuildHelper.DEFAULT_SERV_TESTS_HTML_DIR
self.serv_cov_html_dir = BuildHelper.DEFAULT_SERV_COV_HTML_DIR
self.serv_docs_build_html_dir = BuildHelper.DEFAULT_SERV_DOCS_BUILD_HTML_DIR
self.build_artifacts_dir = os.getenv('CIRCLE_ARTIFACTS')
self.build_test_dir = os.getenv('CIRCLE_TEST_REPORTS')
self._sftp = None
########################
# Installing dependencies
########################
def install_requirements(self):
""" Install requirements """
# requirements for package
subprocess.check_call(['pip', 'install', '-r', 'requirements.txt'])
# requirements for testing and documentation
subprocess.check_call(['apt-get', 'install', 'libffi-dev'])
subprocess.check_call(['pip', 'install', '-r', os.path.join(self.proj_tests_dir, 'requirements.txt')])
subprocess.check_call(['pip', 'install', '-r', os.path.join(self.proj_docs_dir, 'requirements.txt')])
########################
# Running tests
########################
def run_tests(self, test_path='tests', with_xml_report=False, with_coverage=False):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to `xml_file`.
Args:
test_path (:obj:`str`, optional): path to tests that should be run
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
xml_file (:obj:`str`, optional): path to save test results
Raises:
:obj:`BuildHelperError`: If package directory not set
"""
cmd = ['nosetests', test_path]
abs_nose_latest_filename = os.path.join(self.proj_tests_nose_dir, self.proj_tests_nose_latest_filename)
if with_xml_report:
cmd.append('--with-xunit')
cmd.append('--xunit-file=%s' % abs_nose_latest_filename)
if not os.path.isdir(self.proj_tests_nose_dir):
os.makedirs(self.proj_tests_nose_dir)
if with_coverage:
if not self.package_dir:
raise BuildHelperError('Package directory not set')
cmd.append('--with-coverage')
cmd.append('--cover-package=%s' % self.package_dir)
subprocess.check_call(cmd)
if with_xml_report and self.build_test_dir:
shutil.copyfile(abs_nose_latest_filename, os.path.join(self.build_test_dir, 'nose.xml'))
def make_and_archive_reports(self):
""" Make and archive reports;
* Generate HTML test history reports
* Generate HTML coverage reports
* Generate HTML API documentation
* Archive reports to lab server and Coveralls
"""
""" test reports """
# create directory with test result history
self.download_nose_test_report_history_from_lab_server()
shutil.copyfile(
os.path.join(self.proj_tests_nose_dir, self.proj_tests_nose_latest_filename),
os.path.join(self.proj_tests_nose_dir, "%d.xml" % self.build_num)
)
# make report of test history
self.make_test_history_report()
# copy test history to lab server
self.archive_test_reports()
""" coverage """
# Create HTML report
self.make_html_coverage_report()
# Copy coverage report to artifacts directory
# Upload coverage report to Coveralls
# Upload coverage report to lab server
self.archive_coverage_report()
""" documentation """
self.make_documentation()
self.archive_documentation()
########################
# Test reports
########################
def download_nose_test_report_history_from_lab_server(self):
""" Download XML test report history from lab server """
if not os.path.isdir(self.proj_tests_nose_dir):
os.makedirs(self.proj_tests_nose_dir)
for report_filename in glob(os.path.join(self.proj_tests_nose_dir, "[0-9]*.xml")):
os.remove(report_filename)
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_tests_nose_dir)
sftp.get_d(self.serv_tests_nose_dir, self.proj_tests_nose_dir)
def make_test_history_report(self):
""" Make an HTML test history report from a directory of nose-style XML test reports
Raises:
:obj:`BuildHelperError`:
"""
# Make XML and HTML test reports that are readable UnitTH
for build_file_path in glob(os.path.join(self.proj_tests_nose_dir, "[0-9]*.xml")):
build_base_name = os.path.basename(build_file_path)
build_num = os.path.splitext(build_base_name)[0]
# Split nose-style XML report into UnitTH-style reports for each package
if not os.path.isdir(os.path.join(self.proj_tests_unitth_dir, build_num)):
os.makedirs(os.path.join(self.proj_tests_unitth_dir, build_num))
Nose2UnitthConverter.run(build_file_path, os.path.join(self.proj_tests_unitth_dir, build_num))
# Make HTML report from nose-style test XML report
with open(os.path.join(os.path.join(self.proj_tests_unitth_dir, build_num, 'index.html')), 'wb') as html_file:
print >> html_file, JunitParser(build_file_path).html()
# Make HTML test history report
if not os.path.isdir(self.proj_tests_html_dir):
os.makedirs(self.proj_tests_html_dir)
subprocess.check_call(''
+ 'java '
+ '-Dunitth.generate.exectimegraphs=true '
+ '-Dunitth.xml.report.filter= '
+ '-Dunitth.html.report.path=. '
+ ('-Dunitth.report.dir=%s ' % self.proj_tests_html_dir)
+ '-jar lib/unitth/unitth.jar '
+ os.path.join(self.proj_tests_unitth_dir, '*'),
shell=True)
def archive_test_reports(self):
""" Archive test report:
* Upload XML and HTML test reports to lab server
"""
self.upload_test_reports_to_lab_server()
def upload_test_reports_to_lab_server(self):
""" Upload XML and HTML test reports to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_tests_nose_dir)
sftp.makedirs(self.serv_tests_unitth_dir)
sftp.makedirs(self.serv_tests_html_dir)
sftp.makedirs(os.path.join(self.serv_tests_unitth_dir, '%d' % self.build_num))
sftp.put(os.path.join(self.proj_tests_nose_dir, '%d.xml' % self.build_num),
os.path.join(self.serv_tests_nose_dir, '%d.xml' % self.build_num))
sftp.put_r(os.path.join(self.proj_tests_unitth_dir, '%d' % self.build_num),
os.path.join(self.serv_tests_unitth_dir, '%d' % self.build_num))
sftp.put_r(self.proj_tests_html_dir, self.serv_tests_html_dir)
########################
# Coverage reports
########################
def make_html_coverage_report(self):
""" Make HTML coverage report from `proj_cov_filename` """
if not os.path.isdir(self.proj_cov_html_dir):
os.makedirs(self.proj_cov_html_dir)
map(os.remove, glob(os.path.join(self.proj_cov_html_dir, '*')))
subprocess.check_call(['coverage', 'html', '-d', self.proj_cov_html_dir])
def archive_coverage_report(self):
""" Archive coverage report:
* Copy report to artifacts directory
* Upload report to Coveralls
* Upload HTML report to lab server
"""
# copy to artifacts directory
self.copy_coverage_report_to_artifacts_directory()
# upload to Coveralls
self.upload_coverage_report_to_coveralls()
# upload to lab server
self.upload_html_coverage_report_to_lab_server()
def copy_coverage_report_to_artifacts_directory(self):
""" Copy coverage report to CircleCI artifacts directory """
if self.build_artifacts_dir:
shutil.copyfile(self.proj_cov_filename, os.path.join(self.build_artifacts_dir, self.proj_cov_filename))
def upload_coverage_report_to_coveralls(self):
""" Upload coverage report to Coveralls """
if os.getenv('COVERALLS_REPO_TOKEN'):
subprocess.check_call('coveralls')
def upload_html_coverage_report_to_lab_server(self):
""" Upload HTML coverage report to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_cov_html_dir)
sftp.put_r(self.proj_cov_html_dir, self.serv_cov_html_dir)
########################
# Documentation
########################
def make_documentation(self):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Raises:
:obj:`BuildHelperError`: If project name or code server password not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
sys.mkdir(self.proj_docs_static_dir)
# compile API documentation
if not self.package_dir:
raise BuildHelperError('Package directory not set')
subprocess.check_call(['sphinx-apidoc', '-f', '-o', self.proj_docs_source_dir, self.package_dir])
# build HTML documentation
subprocess.check_call(['sphinx-build', self.proj_docs_dir, self.proj_docs_build_html_dir])
def archive_documentation(self):
""" Archive documentation:
* Upload documentation to lab server
"""
self.upload_documentation_to_lab_server()
def upload_documentation_to_lab_server(self):
""" Upload documentation to lab server """
sftp = self.connect_to_lab_server()
with iocapture.capture() as captured:
with sftp.cd(os.path.join(self.code_server_base_dir, self.project_name)):
sftp.makedirs(self.serv_docs_build_html_dir)
sftp.put_r(self.proj_docs_build_html_dir, self.serv_docs_build_html_dir)
def connect_to_lab_server(self):
""" Connect to lab server
Raises:
:obj:`BuildHelperError`: If project name or code server password not set
"""
if not self.project_name:
raise BuildHelperError('Project name not set')
if not self.code_server_password:
raise BuildHelperError('Code server password must be set')
if not self._sftp:
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with iocapture.capture() as captured:
self._sftp = pysftp.Connection(self.code_server_hostname,
username=self.code_server_username,
password=self.code_server_password,
cnopts=cnopts
)
self._sftp.makedirs(os.path.join(self.code_server_base_dir, self.project_name))
return self._sftp
def disconnect_from_lab_server(self):
""" Disconnect from lab server """
if self._sftp:
self._sftp.close()
self._sftp = None
class BuildHelperError(Exception):
pass
|
"""
Class description of a DNA chain built of base pairs
"""
from __future__ import division, unicode_literals, print_function
import basepair
import numpy as np
import matplotlib.pyplot as plt
import rotations as r
from mpl_toolkits.mplot3d import Axes3D # NOQA
from copy import deepcopy
import rotations as rot
BP_SEPARATION = 3.32 # Angstrom
BP_ROTATION = 36 / 180. * np.pi # degrees
class DNAChain(object):
def __init__(self, genome, chain=0):
"""
DNAChain(genome)
Construct a DNA chain from a genome of GATC
"""
self.basepairs_chain0 = self.makeFromGenome(genome, chain=chain)
self.basepairs = self.basepairs_chain0
self.center_in_z()
@staticmethod
def makeFromGenome(genome, chain=0):
dnachain = []
position = np.array([0, 0, 0], dtype=float)
rotation = np.array([0, 0, 0], dtype=float)
index = 0
for char in genome:
print("Appending " + char)
dnachain.append(
basepair.BasePair(char, chain=chain, position=position,
rotation=rotation, index=index))
position += np.array([0., 0., BP_SEPARATION])
rotation += np.array([0., 0., BP_ROTATION])
index += 1
return dnachain
@staticmethod
def turnChain(chain):
zmax = 0
zmin = 0
for pair in chain:
for (name, mol) in pair.iterMolecules():
if mol.position[2] < zmin:
zmin = mol.position[2]
elif mol.position[2] > zmax:
zmax = mol.position[2]
zrange = zmax - zmin
radius = 2. * zrange / np.pi
for pair in chain:
for (name, mol) in pair.iterMolecules():
# Translation of the frame - new center position
theta = np.pi / 2. * (mol.position[2] - zmin) / zrange
neworigin = np.array([radius * (1 - np.cos(theta)),
0.,
radius * np.sin(theta) - radius])
# rotation of the frame
oldframe = np.array([mol.position[0], mol.position[1], 0])
yrotation = np.pi / 2. * (mol.position[2] - zmin) / zrange
newframe = np.dot(r.roty(yrotation), oldframe)
mol.position[0] = neworigin[0] + newframe[0]
mol.position[1] = neworigin[1] + newframe[1]
mol.position[2] = neworigin[2] + newframe[2]
mol.rotate(np.array([0, yrotation, 0]))
return chain
@staticmethod
def turnAndTwistChain(chain):
zmax = 0
zmin = 0
for pair in chain:
for (name, mol) in pair.iterMolecules():
if mol.position[2] < zmin:
zmin = mol.position[2]
elif mol.position[2] > zmax:
zmax = mol.position[2]
zrange = zmax - zmin
radius = 2. * zrange / np.pi
for pair in chain:
for (name, mol) in pair.iterMolecules():
# Translation of the frame - new center position
theta = np.pi / 2. * (mol.position[2] - zmin) / zrange
neworigin = np.array([radius * (1 - np.cos(theta)),
0.,
radius * np.sin(theta) - radius])
# rotation of the frame
oldframe = np.array([mol.position[0], mol.position[1], 0])
yang = np.pi / 2. * (mol.position[2] - zmin) / zrange
xang = np.pi / 2. * (mol.position[2] - zmin) / zrange
print(mol.position[2])
newframe = np.dot(r.rotx(xang), np.dot(r.roty(yang), oldframe))
mol.position[0] = neworigin[0] + newframe[0]
mol.position[1] = neworigin[1] + newframe[1]
mol.position[2] = neworigin[2] + newframe[2]
mol.rotate(np.array([xang, yang, 0]))
return chain
def center_in_z(self):
"""
DNAChain.center_in_z()
Center the molecule around the z=0 plane
"""
minz = 0
maxz = 0
for bp in self.basepairs:
for (name, mol) in bp.iterMolecules():
if mol.position[2] < minz:
minz = mol.position[2]
elif mol.position[2] > maxz:
maxz = mol.position[2]
ztrans = (minz - maxz)/2. - minz
translation = np.array([0., 0., ztrans])
for bp in self.basepairs:
bp.translate(translation)
return None
def to_text(self, seperator=" "):
"""
Return a description of the molecules in the chain as text
"""
key = "# NAME SHAPE CHAIN_ID STRAND_ID BP_INDEX " +\
"SIZE_X SIZE_Y SIZE_Z POS_X " +\
"POS_Y POS_Z ROT_X ROT_Y ROT_Z\n"
output = [key]
for pair in self.basepairs:
output.append(pair.to_text(seperator=seperator))
return "".join(output)
def to_plot(self):
"""
Return a matplotlib.Figure instance with molecules plotted
"""
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "dnasugar":
sugars.append(molecule.position)
elif molecule.name.lower() == "triphosphate":
triphosphates.append(molecule.position)
elif molecule.name.lower() in bps:
bases.append(molecule.position)
# Plotting
bases = zip( * map(list, bases))
triphosphates = zip( * map(list, triphosphates))
sugars = zip( * map(list, sugars))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(bases[0], bases[1], bases[2], c="0.6", s=20)
ax.scatter(triphosphates[0], triphosphates[1], triphosphates[2], c="y",
s=20)
ax.scatter(sugars[0], sugars[1], sugars[2], c="r", s=20)
return fig
def to_surface_plot(self):
"""
Plot the surfaces of each molecule in the chain.
Avoid this with large chains, this assumes each molecule is an ellipse
"""
def ellipse_xyz(center, extent, rotation=np.zeros([3])):
rmatrix = rot.eulerMatrix(*rotation)
[a, b, c] = extent
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = a * np.cos(u) * np.sin(v) + center[0]
y = b * np.sin(u) * np.sin(v) + center[1]
z = c * np.cos(v) + center[2]
for ii in range(0, len(x)):
for jj in range(0, len(x[ii])):
row = np.array([x[ii][jj], y[ii][jj], z[ii][jj]]) - center
xp, yp, zp = np.dot(rmatrix, row.transpose())
x[ii][jj] = xp + center[0]
y[ii][jj] = yp + center[1]
z[ii][jj] = zp + center[2]
return x, y, z
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "dnasugar":
sugars.append((molecule.position, molecule.dimensions,
molecule.rotation))
elif molecule.name.lower() == "triphosphate":
triphosphates.append((molecule.position,
molecule.dimensions,
molecule.rotation))
elif molecule.name.lower() in bps:
bases.append((molecule.position, molecule.dimensions,
molecule.rotation))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for base in bases:
x, y, z = ellipse_xyz(base[0], base[1], rotation=base[2])
ax.plot_wireframe(x, y, z, color="0.6")
for phosphate in triphosphates:
x, y, z = ellipse_xyz(phosphate[0], phosphate[1],
rotation=phosphate[2])
ax.plot_wireframe(x, y, z, color="y")
for sugar in sugars:
x, y, z = ellipse_xyz(sugar[0], sugar[1], rotation=sugar[2])
ax.plot_wireframe(x, y, z, color="r")
return fig
class TurnedDNAChain(DNAChain):
"""
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super(TurnedDNAChain, self).__init__(genome)
self.turnDNA()
def turnDNA(self):
self.basepairs = DNAChain.turnChain(self.basepairs)
return None
class TurnedTwistedDNAChain(DNAChain):
"""
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super(TurnedTwistedDNAChain, self).__init__(genome)
self.turnAndTwistDNA()
def turnAndTwistDNA(self):
self.basepairs = DNAChain.turnAndTwistChain(self.basepairs)
return None
class DoubleDNAChain(DNAChain):
"""
"""
def __init__(self, genome, separation):
"""
DoubleDNAChain(genome, separation)
Construct two parallel straight DNA chains
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
"""
super(DoubleDNAChain, self).__init__(genome)
self.duplicateDNA(separation)
def duplicateDNA(self, separation):
translation = np.array([0., separation / 2., 0.], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation)
bp.setNewChain(1)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation)
bp.setNewChain(2)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1
class TurnedDoubleDNAChain(TurnedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self.makeFromGenome(genome)
self.duplicateDNA(separation=separation)
self.turnDNA()
class TurnedTwistedDoubleDNAChain(TurnedTwistedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self.makeFromGenome(genome)
self.duplicateDNA(separation=separation)
self.turnAndTwistDNA()
class FourStrandDNAChain(DNAChain):
def __init__(self, genome, separation):
"""
FourStrandDNAChain(genome, separation)
Construct four parallel straight DNA chains
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
"""
super(FourStrandDNAChain, self).__init__(genome)
self.makeFourStrands(separation)
def makeFourStrands(self, separation):
translation_y = np.array([0., separation / 2., 0.], dtype=float)
translation_x = np.array([separation / 2., 0., 0.], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
self.basepairs_chain2 = deepcopy(self.basepairs_chain0)
self.basepairs_chain3 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation_y)
bp.setNewChain(0)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation_y)
bp.setNewChain(1)
for bp in self.basepairs_chain2:
bp.translate(translation_x)
bp.setNewChain(2)
for bp in self.basepairs_chain3:
bp.translate(-1 * translation_x)
bp.setNewChain(3)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1 + \
self.basepairs_chain2 + self.basepairs_chain3
class FourStrandTurnedDNAChain(DNAChain):
def __init__(self, genome, separation, twist=False):
"""
FourStrandTurnedDNAChain(genome, separation)
Construct four DNA chains that turn 90 degrees
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
kwargs:
twist: boolean, add a 90 deg twist to each chain
"""
DNAChain.__init__(self, genome)
translation_y = np.array([0., separation / 2., 0.], dtype=float)
translation_x = np.array([separation / 2., 0., -separation / 2.],
dtype=float)
if twist is True:
transform = self.turnAndTwistChain
else:
transform = self.turnChain
radiusMiddleChain =\
len(self.basepairs_chain0) * BP_SEPARATION * 2 / np.pi
radiusInnerChain = (radiusMiddleChain - separation / 2.)
radiusOuterChain = (radiusMiddleChain + separation / 2.)
self.basepairs_chain1 = DNAChain(genome, chain=1).basepairs
chain2Length = \
int(np.floor(radiusInnerChain / radiusMiddleChain * len(genome)))
chain3Length = \
int(np.floor(radiusOuterChain / radiusMiddleChain * len(genome)))
longGenome = genome * int(np.ceil(radiusOuterChain /
radiusMiddleChain))
genome_chain2 = genome[:chain2Length]
self.basepairs_chain2 = DNAChain(genome_chain2, chain=2).basepairs
genome_chain3 = longGenome[:chain3Length]
self.basepairs_chain3 = DNAChain(genome_chain3, chain=3).basepairs
# pdb.set_trace()
self.basepairs_chain0 = transform(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation_y)
self.basepairs_chain1 = transform(self.basepairs_chain1)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation_y)
self.basepairs_chain2 = transform(self.basepairs_chain2)
for bp in self.basepairs_chain2:
bp.translate(translation_x)
self.basepairs_chain3 = transform(self.basepairs_chain3)
for bp in self.basepairs_chain3:
bp.translate(-1 * translation_x)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1 + \
self.basepairs_chain2 + self.basepairs_chain3
return None
Changed DNA strand ids for four strand motif
"""
Class description of a DNA chain built of base pairs
"""
from __future__ import division, unicode_literals, print_function
import basepair
import numpy as np
import matplotlib.pyplot as plt
import rotations as r
from mpl_toolkits.mplot3d import Axes3D # NOQA
from copy import deepcopy
import rotations as rot
BP_SEPARATION = 3.32 # Angstrom
BP_ROTATION = 36 / 180. * np.pi # degrees
class DNAChain(object):
def __init__(self, genome, chain=0):
"""
DNAChain(genome)
Construct a DNA chain from a genome of GATC
"""
self.basepairs_chain0 = self.makeFromGenome(genome, chain=chain)
self.basepairs = self.basepairs_chain0
self.center_in_z()
@staticmethod
def makeFromGenome(genome, chain=0):
dnachain = []
position = np.array([0, 0, 0], dtype=float)
rotation = np.array([0, 0, 0], dtype=float)
index = 0
for char in genome:
print("Appending " + char)
dnachain.append(
basepair.BasePair(char, chain=chain, position=position,
rotation=rotation, index=index))
position += np.array([0., 0., BP_SEPARATION])
rotation += np.array([0., 0., BP_ROTATION])
index += 1
return dnachain
@staticmethod
def turnChain(chain):
zmax = 0
zmin = 0
for pair in chain:
for (name, mol) in pair.iterMolecules():
if mol.position[2] < zmin:
zmin = mol.position[2]
elif mol.position[2] > zmax:
zmax = mol.position[2]
zrange = zmax - zmin
radius = 2. * zrange / np.pi
for pair in chain:
for (name, mol) in pair.iterMolecules():
# Translation of the frame - new center position
theta = np.pi / 2. * (mol.position[2] - zmin) / zrange
neworigin = np.array([radius * (1 - np.cos(theta)),
0.,
radius * np.sin(theta) - radius])
# rotation of the frame
oldframe = np.array([mol.position[0], mol.position[1], 0])
yrotation = np.pi / 2. * (mol.position[2] - zmin) / zrange
newframe = np.dot(r.roty(yrotation), oldframe)
mol.position[0] = neworigin[0] + newframe[0]
mol.position[1] = neworigin[1] + newframe[1]
mol.position[2] = neworigin[2] + newframe[2]
mol.rotate(np.array([0, yrotation, 0]))
return chain
@staticmethod
def turnAndTwistChain(chain):
zmax = 0
zmin = 0
for pair in chain:
for (name, mol) in pair.iterMolecules():
if mol.position[2] < zmin:
zmin = mol.position[2]
elif mol.position[2] > zmax:
zmax = mol.position[2]
zrange = zmax - zmin
radius = 2. * zrange / np.pi
for pair in chain:
for (name, mol) in pair.iterMolecules():
# Translation of the frame - new center position
theta = np.pi / 2. * (mol.position[2] - zmin) / zrange
neworigin = np.array([radius * (1 - np.cos(theta)),
0.,
radius * np.sin(theta) - radius])
# rotation of the frame
oldframe = np.array([mol.position[0], mol.position[1], 0])
yang = np.pi / 2. * (mol.position[2] - zmin) / zrange
xang = np.pi / 2. * (mol.position[2] - zmin) / zrange
print(mol.position[2])
newframe = np.dot(r.rotx(xang), np.dot(r.roty(yang), oldframe))
mol.position[0] = neworigin[0] + newframe[0]
mol.position[1] = neworigin[1] + newframe[1]
mol.position[2] = neworigin[2] + newframe[2]
mol.rotate(np.array([xang, yang, 0]))
return chain
def center_in_z(self):
"""
DNAChain.center_in_z()
Center the molecule around the z=0 plane
"""
minz = 0
maxz = 0
for bp in self.basepairs:
for (name, mol) in bp.iterMolecules():
if mol.position[2] < minz:
minz = mol.position[2]
elif mol.position[2] > maxz:
maxz = mol.position[2]
ztrans = (minz - maxz)/2. - minz
translation = np.array([0., 0., ztrans])
for bp in self.basepairs:
bp.translate(translation)
return None
def to_text(self, seperator=" "):
"""
Return a description of the molecules in the chain as text
"""
key = "# NAME SHAPE CHAIN_ID STRAND_ID BP_INDEX " +\
"SIZE_X SIZE_Y SIZE_Z POS_X " +\
"POS_Y POS_Z ROT_X ROT_Y ROT_Z\n"
output = [key]
for pair in self.basepairs:
output.append(pair.to_text(seperator=seperator))
return "".join(output)
def to_plot(self):
"""
Return a matplotlib.Figure instance with molecules plotted
"""
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "dnasugar":
sugars.append(molecule.position)
elif molecule.name.lower() == "triphosphate":
triphosphates.append(molecule.position)
elif molecule.name.lower() in bps:
bases.append(molecule.position)
# Plotting
bases = zip( * map(list, bases))
triphosphates = zip( * map(list, triphosphates))
sugars = zip( * map(list, sugars))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(bases[0], bases[1], bases[2], c="0.6", s=20)
ax.scatter(triphosphates[0], triphosphates[1], triphosphates[2], c="y",
s=20)
ax.scatter(sugars[0], sugars[1], sugars[2], c="r", s=20)
return fig
def to_surface_plot(self):
"""
Plot the surfaces of each molecule in the chain.
Avoid this with large chains, this assumes each molecule is an ellipse
"""
def ellipse_xyz(center, extent, rotation=np.zeros([3])):
rmatrix = rot.eulerMatrix(*rotation)
[a, b, c] = extent
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = a * np.cos(u) * np.sin(v) + center[0]
y = b * np.sin(u) * np.sin(v) + center[1]
z = c * np.cos(v) + center[2]
for ii in range(0, len(x)):
for jj in range(0, len(x[ii])):
row = np.array([x[ii][jj], y[ii][jj], z[ii][jj]]) - center
xp, yp, zp = np.dot(rmatrix, row.transpose())
x[ii][jj] = xp + center[0]
y[ii][jj] = yp + center[1]
z[ii][jj] = zp + center[2]
return x, y, z
sugars = []
triphosphates = []
bases = []
bps = ["guanine", "adenine", "thymine", "cytosine"]
for pair in self.basepairs:
for (name, molecule) in pair.iterMolecules():
if molecule.name.lower() == "dnasugar":
sugars.append((molecule.position, molecule.dimensions,
molecule.rotation))
elif molecule.name.lower() == "triphosphate":
triphosphates.append((molecule.position,
molecule.dimensions,
molecule.rotation))
elif molecule.name.lower() in bps:
bases.append((molecule.position, molecule.dimensions,
molecule.rotation))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for base in bases:
x, y, z = ellipse_xyz(base[0], base[1], rotation=base[2])
ax.plot_wireframe(x, y, z, color="0.6")
for phosphate in triphosphates:
x, y, z = ellipse_xyz(phosphate[0], phosphate[1],
rotation=phosphate[2])
ax.plot_wireframe(x, y, z, color="y")
for sugar in sugars:
x, y, z = ellipse_xyz(sugar[0], sugar[1], rotation=sugar[2])
ax.plot_wireframe(x, y, z, color="r")
return fig
class TurnedDNAChain(DNAChain):
"""
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super(TurnedDNAChain, self).__init__(genome)
self.turnDNA()
def turnDNA(self):
self.basepairs = DNAChain.turnChain(self.basepairs)
return None
class TurnedTwistedDNAChain(DNAChain):
"""
"""
def __init__(self, genome):
"""
TurnedDNAChain(genome)
Construct a DNA chain from a genome of GATC that turns 90 degrees
"""
super(TurnedTwistedDNAChain, self).__init__(genome)
self.turnAndTwistDNA()
def turnAndTwistDNA(self):
self.basepairs = DNAChain.turnAndTwistChain(self.basepairs)
return None
class DoubleDNAChain(DNAChain):
"""
"""
def __init__(self, genome, separation):
"""
DoubleDNAChain(genome, separation)
Construct two parallel straight DNA chains
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
"""
super(DoubleDNAChain, self).__init__(genome)
self.duplicateDNA(separation)
def duplicateDNA(self, separation):
translation = np.array([0., separation / 2., 0.], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation)
bp.setNewChain(1)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation)
bp.setNewChain(2)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1
class TurnedDoubleDNAChain(TurnedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self.makeFromGenome(genome)
self.duplicateDNA(separation=separation)
self.turnDNA()
class TurnedTwistedDoubleDNAChain(TurnedTwistedDNAChain, DoubleDNAChain):
def __init__(self, genome, separation):
self.makeFromGenome(genome)
self.duplicateDNA(separation=separation)
self.turnAndTwistDNA()
class FourStrandDNAChain(DNAChain):
def __init__(self, genome, separation):
"""
FourStrandDNAChain(genome, separation)
Construct four parallel straight DNA chains
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
"""
super(FourStrandDNAChain, self).__init__(genome)
self.makeFourStrands(separation)
def makeFourStrands(self, separation):
translation_y = np.array([0., separation / 2., 0.], dtype=float)
translation_x = np.array([separation / 2., 0., 0.], dtype=float)
self.basepairs_chain1 = deepcopy(self.basepairs_chain0)
self.basepairs_chain2 = deepcopy(self.basepairs_chain0)
self.basepairs_chain3 = deepcopy(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation_y)
bp.setNewChain(0)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation_y)
bp.setNewChain(1)
for bp in self.basepairs_chain2:
bp.translate(translation_x)
bp.setNewChain(2)
for bp in self.basepairs_chain3:
bp.translate(-1 * translation_x)
bp.setNewChain(3)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1 + \
self.basepairs_chain2 + self.basepairs_chain3
class FourStrandTurnedDNAChain(DNAChain):
def __init__(self, genome, separation, twist=False):
"""
FourStrandTurnedDNAChain(genome, separation)
Construct four DNA chains that turn 90 degrees.
Chain indices are assigned anticlockwise starting from the +y strand.
args:
genome: string of GATC specifying genome order
separation: separation of each strand from the center in angstroms
kwargs:
twist: boolean, add a 90 deg twist to each chain
"""
DNAChain.__init__(self, genome)
translation_y = np.array([0., separation / 2., 0.], dtype=float)
translation_x = np.array([separation / 2., 0., -separation / 2.],
dtype=float)
if twist is True:
transform = self.turnAndTwistChain
else:
transform = self.turnChain
radiusMiddleChain =\
len(self.basepairs_chain0) * BP_SEPARATION * 2 / np.pi
radiusInnerChain = (radiusMiddleChain - separation / 2.)
radiusOuterChain = (radiusMiddleChain + separation / 2.)
self.basepairs_chain2 = DNAChain(genome, chain=2).basepairs
chain2Length = \
int(np.floor(radiusInnerChain / radiusMiddleChain * len(genome)))
chain3Length = \
int(np.floor(radiusOuterChain / radiusMiddleChain * len(genome)))
longGenome = genome * int(np.ceil(radiusOuterChain /
radiusMiddleChain))
genome_chain2 = genome[:chain2Length]
self.basepairs_chain3 = DNAChain(genome_chain2, chain=3).basepairs
genome_chain3 = longGenome[:chain3Length]
self.basepairs_chain1 = DNAChain(genome_chain3, chain=1).basepairs
# pdb.set_trace()
self.basepairs_chain0 = transform(self.basepairs_chain0)
for bp in self.basepairs_chain0:
bp.translate(translation_y)
self.basepairs_chain1 = transform(self.basepairs_chain1)
for bp in self.basepairs_chain1:
bp.translate(-1 * translation_x)
self.basepairs_chain2 = transform(self.basepairs_chain2)
for bp in self.basepairs_chain2:
bp.translate(-1 * translation_y)
self.basepairs_chain3 = transform(self.basepairs_chain3)
for bp in self.basepairs_chain3:
bp.translate(translation_x)
self.basepairs = self.basepairs_chain0 + self.basepairs_chain1 + \
self.basepairs_chain2 + self.basepairs_chain3
return None
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.config import cfg
import testtools
from quantum.plugins.linuxbridge.agent import linuxbridge_quantum_agent
from quantum.plugins.linuxbridge.common import constants as lconst
from quantum.tests import base
class TestLinuxBridge(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridge, self).setUp()
self.addCleanup(cfg.CONF.reset)
interface_mappings = {'physnet1': 'eth1'}
root_helper = cfg.CONF.AGENT.root_helper
self.linux_bridge = linuxbridge_quantum_agent.LinuxBridgeManager(
interface_mappings, root_helper)
def test_ensure_physical_in_bridge_invalid(self):
result = self.linux_bridge.ensure_physical_in_bridge('network_id',
'physnetx',
7)
self.assertFalse(result)
def test_ensure_physical_in_bridge_flat(self):
with mock.patch.object(self.linux_bridge,
'ensure_flat_bridge') as flat_bridge_func:
result = self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'physnet1', lconst.FLAT_VLAN_ID)
self.assertTrue(flat_bridge_func.called)
def test_ensure_physical_in_bridge_vlan(self):
with mock.patch.object(self.linux_bridge,
'ensure_vlan_bridge') as vlan_bridge_func:
result = self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'physnet1', 7)
self.assertTrue(vlan_bridge_func.called)
class TestLinuxBridgeAgent(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeAgent, self).setUp()
self.lbmgr_patcher = mock.patch('quantum.plugins.linuxbridge.agent.'
'linuxbridge_quantum_agent.'
'LinuxBridgeManager')
self.lbmgr_mock = self.lbmgr_patcher.start()
self.addCleanup(self.lbmgr_patcher.stop)
def test_update_devices_failed(self):
lbmgr_instance = self.lbmgr_mock.return_value
lbmgr_instance.update_devices.side_effect = RuntimeError
agent = linuxbridge_quantum_agent.LinuxBridgeQuantumAgentRPC({},
0,
None)
raise_exception = [0]
def info_mock(msg):
if raise_exception[0] < 2:
raise_exception[0] += 1
else:
raise RuntimeError()
with mock.patch.object(linuxbridge_quantum_agent.LOG, 'info') as log:
log.side_effect = info_mock
with testtools.ExpectedException(RuntimeError):
agent.daemon_loop()
self.assertEqual(3, log.call_count)
def test_process_network_devices_failed(self):
device_info = {'current': [1, 2, 3]}
lbmgr_instance = self.lbmgr_mock.return_value
lbmgr_instance.update_devices.return_value = device_info
agent = linuxbridge_quantum_agent.LinuxBridgeQuantumAgentRPC({},
0,
None)
raise_exception = [0]
def info_mock(msg):
if raise_exception[0] < 2:
raise_exception[0] += 1
else:
raise RuntimeError()
with contextlib.nested(
mock.patch.object(linuxbridge_quantum_agent.LOG, 'info'),
mock.patch.object(agent, 'process_network_devices')
) as (log, process_network_devices):
log.side_effect = info_mock
process_network_devices.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
agent.daemon_loop()
self.assertEqual(3, log.call_count)
Set fake rpc implementation in test_lb_quantum_agent
Fixes bug #1151806
Change-Id: I1a57f8b926960ecfa5ee832e7af8d9f9a2ad177e
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.config import cfg
import testtools
from quantum.plugins.linuxbridge.agent import linuxbridge_quantum_agent
from quantum.plugins.linuxbridge.common import constants as lconst
from quantum.tests import base
class TestLinuxBridge(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridge, self).setUp()
self.addCleanup(cfg.CONF.reset)
interface_mappings = {'physnet1': 'eth1'}
root_helper = cfg.CONF.AGENT.root_helper
self.linux_bridge = linuxbridge_quantum_agent.LinuxBridgeManager(
interface_mappings, root_helper)
def test_ensure_physical_in_bridge_invalid(self):
result = self.linux_bridge.ensure_physical_in_bridge('network_id',
'physnetx',
7)
self.assertFalse(result)
def test_ensure_physical_in_bridge_flat(self):
with mock.patch.object(self.linux_bridge,
'ensure_flat_bridge') as flat_bridge_func:
result = self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'physnet1', lconst.FLAT_VLAN_ID)
self.assertTrue(flat_bridge_func.called)
def test_ensure_physical_in_bridge_vlan(self):
with mock.patch.object(self.linux_bridge,
'ensure_vlan_bridge') as vlan_bridge_func:
result = self.linux_bridge.ensure_physical_in_bridge(
'network_id', 'physnet1', 7)
self.assertTrue(vlan_bridge_func.called)
class TestLinuxBridgeAgent(base.BaseTestCase):
def setUp(self):
super(TestLinuxBridgeAgent, self).setUp()
cfg.CONF.set_override('rpc_backend',
'quantum.openstack.common.rpc.impl_fake')
self.lbmgr_patcher = mock.patch('quantum.plugins.linuxbridge.agent.'
'linuxbridge_quantum_agent.'
'LinuxBridgeManager')
self.lbmgr_mock = self.lbmgr_patcher.start()
self.addCleanup(self.lbmgr_patcher.stop)
def test_update_devices_failed(self):
lbmgr_instance = self.lbmgr_mock.return_value
lbmgr_instance.update_devices.side_effect = RuntimeError
agent = linuxbridge_quantum_agent.LinuxBridgeQuantumAgentRPC({},
0,
None)
raise_exception = [0]
def info_mock(msg):
if raise_exception[0] < 2:
raise_exception[0] += 1
else:
raise RuntimeError()
with mock.patch.object(linuxbridge_quantum_agent.LOG, 'info') as log:
log.side_effect = info_mock
with testtools.ExpectedException(RuntimeError):
agent.daemon_loop()
self.assertEqual(3, log.call_count)
def test_process_network_devices_failed(self):
device_info = {'current': [1, 2, 3]}
lbmgr_instance = self.lbmgr_mock.return_value
lbmgr_instance.update_devices.return_value = device_info
agent = linuxbridge_quantum_agent.LinuxBridgeQuantumAgentRPC({},
0,
None)
raise_exception = [0]
def info_mock(msg):
if raise_exception[0] < 2:
raise_exception[0] += 1
else:
raise RuntimeError()
with contextlib.nested(
mock.patch.object(linuxbridge_quantum_agent.LOG, 'info'),
mock.patch.object(agent, 'process_network_devices')
) as (log, process_network_devices):
log.side_effect = info_mock
process_network_devices.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
agent.daemon_loop()
self.assertEqual(3, log.call_count)
|
class IOStreamWrapperMixin(object):
@property
def closed(self):
return self.stream.closed
def seekable(self):
return self.stream.seekable()
def readable(self):
return self.stream.readable()
def writable(self):
return self.stream.writable()
def fileno(self):
return self.stream.fileno()
def __del__(self):
if not self.closed:
self.close()
def check_file_like_for_writing(f):
check = (
hasattr(f, "write") and hasattr(f, "tell") and
hasattr(f, 'flush') and hasattr(f, 'close)'))
return check
Fix typo in attributes check for a file like object (#6)
Fixes #5
class IOStreamWrapperMixin(object):
@property
def closed(self):
return self.stream.closed
def seekable(self):
return self.stream.seekable()
def readable(self):
return self.stream.readable()
def writable(self):
return self.stream.writable()
def fileno(self):
return self.stream.fileno()
def __del__(self):
if not self.closed:
self.close()
def check_file_like_for_writing(f):
check = (
hasattr(f, "write") and hasattr(f, "tell") and
hasattr(f, 'flush') and hasattr(f, 'close'))
return check
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
'''
A Bookmark attaches a set of Tags and a single Object ('class Filename' in the
included example).
'''
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.associationproxy import association_proxy
from kcl.sqlalchemy.get_one_or_create import get_one_or_create
from kcl.sqlalchemy.BaseMixin import BASE
# Timestamps on bookmarks results in 'duplicate' bookmarks
# so dont put timestamps on bookmarks
tagbookmarks_table = \
Table('tagbookmarks', BASE.metadata,
Column('bookmark_id',
Integer,
ForeignKey("bookmark.id"),
primary_key=True),
Column('tag_id',
Integer,
ForeignKey("tag.id"),
primary_key=True),
UniqueConstraint('bookmark_id', 'tag_id'))
@classmethod
def construct(cls, session, tag, **kwargs):
result = get_one_or_create(session, cls, **kwargs)
result.tag_rel.add(tag)
return result
def bookmark_repr(self):
return str(getattr(self, self.target_name)) + ' ' + str(self.tags)
class BookmarkClassConstructor():
def __new__(cls, mapper_to_bookmark):
future_class_attr = {}
future_class_attr['id'] = Column(Integer, primary_key=True)
future_class_attr['tag_rel'] = relationship("Tag",
secondary=lambda: tagbookmarks_table,
collection_class=set,
backref=backref('bookmarks'))
future_class_attr['tags'] = association_proxy('tag_rel', 'tag')
target_class_name = mapper_to_bookmark.__name__
target_name = target_class_name.lower().split('.')[-1] # 'filename' usually
future_class_attr[target_name+'_id'] = Column(Integer, ForeignKey(target_name+'.id'), unique=False, nullable=False)
future_class_attr[target_name] = relationship(target_class_name, backref='bookmarks')
future_class_attr['target_class_name'] = target_class_name
future_class_attr['target_name'] = target_name
future_class_attr['construct'] = construct
future_class_attr['__repr__'] = bookmark_repr
return type('Bookmark', (BASE,), future_class_attr)
auto-commit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
'''
A Bookmark attaches a set of Tags and a single Object ('class Filename' in the
included example).
'''
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.associationproxy import association_proxy
from kcl.sqlalchemy.get_one_or_create import get_one_or_create
from kcl.sqlalchemy.BaseMixin import BASE
# Timestamps on bookmarks results in 'duplicate' bookmarks
# so dont put timestamps on bookmarks
tagbookmarks_table = \
Table('tagbookmarks', BASE.metadata,
Column('bookmark_id',
Integer,
ForeignKey("bookmark.id"),
primary_key=True),
Column('tag_id',
Integer,
ForeignKey("tag.id"),
primary_key=True),
UniqueConstraint('bookmark_id', 'tag_id'))
@classmethod
def construct(cls, session, tag, **kwargs):
result = get_one_or_create(session, cls, **kwargs)
result.tag_rel.add(tag)
return result
def bookmark_repr(self):
return str(getattr(self, self.target_name)) + ' ' + str(self.tags)
class BookmarkClassConstructor():
def __new__(cls, mapper_to_bookmark, mapper_to_bookmark_placeholder=False):
future_class_attr = {}
future_class_attr['id'] = Column(Integer, primary_key=True)
future_class_attr['tag_rel'] = relationship("Tag",
secondary=lambda: tagbookmarks_table,
collection_class=set,
backref=backref('bookmarks'))
future_class_attr['tags'] = association_proxy('tag_rel', 'tag')
target_class_name = mapper_to_bookmark.__name__
target_name = target_class_name.lower().split('.')[-1] # 'filename' usually
future_class_attr[target_name+'_id'] = Column(Integer, ForeignKey(target_name+'.id'), unique=False, nullable=False)
future_class_attr[target_name] = relationship(target_class_name, backref='bookmarks')
future_class_attr['target_class_name'] = target_class_name
future_class_attr['target_name'] = target_name
if mapper_to_bookmark_placeholder:
target_class_name_placeholder = mapper_to_bookmark_placeholder.__name__
target_name_placeholder = target_class_name_placeholder.lower().split('.')[-1] # byteoffset in the filename case
future_class_attr[target_name_placeholder+'_id'] = Column(Integer, ForeignKey(target_name_placeholder+'.id'), unique=False, nullable=False)
future_class_attr[target_name_placeholder] = relationship(target_class_name_placeholder, backref='bookmarks')
future_class_attr['target_class_name_placeholder'] = target_class_name_placeholder
future_class_attr['target_name_placeholder'] = target_name_placeholder
future_class_attr['construct'] = construct
future_class_attr['__repr__'] = bookmark_repr
return type('Bookmark', (BASE,), future_class_attr)
|
# pip install neo4j-driver wordcloud matplotlib pillow image
import argparse
import csv
import matplotlib
import numpy as np
import os
import random
import wordcloud
from neo4j.v1 import GraphDatabase, basic_auth
matplotlib.use('Agg')
import matplotlib.pyplot
import re
def grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "hsl(%d, 100%%, 100%%)" % random.randint(120, 160)
def get_most_common_attributes(db_driver, n_attributes, force=True):
attr_types = []
if n_attributes == 0:
return attr_types
else:
if not force and os.path.isfile('neo4j-analysis/csv/attr_common.csv'):
with open('neo4j-analysis/csv/attr_common.csv', 'r') as f:
attr_re = re.compile("(?P<type>(?:\w+\s?)+)\s\((?P<usage_count>\d+)\)")
csv_reader = csv.reader(f)
n = 0
while n < n_attributes:
row = next(csv_reader)
value = row[0]
result = attr_re.match(value)
attr_types.append((str(result.group('type')), int(result.group('usage_count'))))
n += 1
else:
print 'Querying database for the %d most common attribute types' % n_attributes
with db_driver.session() as session:
results = session.run("MATCH (:Sample)-[u:hasAttribute]->(a:Attribute) "
"RETURN a.type AS type, COUNT(u) AS usage_count "
"ORDER BY usage_count DESC "
"LIMIT {n_attributes}", {"n_attributes": n_attributes})
for result in results:
attr_types.append((result["type"], result["usage_count"]))
return attr_types
def get_usage_count(db_driver, attr_type):
with db_driver.session() as session:
results = session.run("MATCH (s:Sample)-[u:hasAttribute]->(:Attribute{type:{attr_type}}) "
"RETURN COUNT(u) AS usage_count", {"attr_type": attr_type})
for result in results:
return result["usage_count"]
def generate_summary(args, db_driver):
print "generating summary of most common attribute types and values"
generate_summary_spreadsheet(args, db_driver)
generate_summary_plots(args, db_driver)
generate_summary_wordcloud(args, db_driver)
print "generated summary of most common attribute types and values"
def generate_summary_spreadsheet(args, db_driver):
print "generating summary spreadsheet of most common attribute types and values"
common = get_most_common_attributes(db_driver, 100, force=True)
try:
os.makedirs("neo4j-analysis/csv")
except OSError:
pass
with open("neo4j-analysis/csv/attr_common.csv", "w") as outfile:
csvout = csv.writer(outfile)
for attr in common:
row = ["{} ({})".format(attr[0], attr[1])]
with db_driver.session() as session2:
cypher = "MATCH (s:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name: {attr_type}}), \
(a:Attribute)-->(v:AttributeValue) \
RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT 10"
results2 = session2.run(cypher, {"attr_type": attr[0]})
for result2 in results2:
row.append("{} ({})".format(result2["value"], result2["usage_count"]))
csvout.writerow(row)
print "generated summary spreadsheet of most common attribute types and values"
def generate_summary_plots(args, db_driver):
print "generating summary plots of most common attribute types and values"
cypher = "MATCH (s:Sample)-->(a:Attribute) \
WITH s, COUNT(DISTINCT a) AS attr_count \
RETURN attr_count, COUNT(s) as samples_count \
ORDER BY attr_count ASC"
n_attr = []
n_samples = []
with db_driver.session() as session:
results = session.run(cypher)
for record in results:
n_attr.append(record["attr_count"])
n_samples.append(record["samples_count"])
fig = matplotlib.pyplot.figure(figsize=(12, 6))
axis = fig.add_axes((0.0, 0.0, 1.0, 1.0), title="Frequency distribution of number of attributes on each sample")
axis.bar(n_attr, n_samples)
axis.set_yscale("log")
axis.set_xlabel("Number of attributes")
axis.set_ylabel("Frequency")
try:
os.makedirs("neo4j-analysis/plot")
except OSError:
pass
fig.savefig("neo4j-analysis/plot/freq-of-number-attrs.png", bbox_inches='tight')
"""
There are some samples that have many many attributes. Typically, these are survey results
e.g. SAMEA4394014
"""
print "generated summary plots of most common attribute types and values"
def generate_summary_wordcloud(args, db_driver):
max_words = args.wordcloud_entries
if max_words < 1:
return
freq = []
print "generating wordcloud of most common attribute types and values"
common = get_most_common_attributes(db_driver, max_words)
for attr in common:
freq.append((attr[0], attr[1]))
wc = wordcloud.WordCloud(width=640, height=512, scale=2.0, max_words=max_words).generate_from_frequencies(freq)
wc.recolor(color_func=grey_color_func, random_state=3)
try:
os.makedirs("neo4j-analysis/word_clouds")
except OSError:
pass
wc.to_file("neo4j-analysis/word_clouds/cloud-types.png")
print "generated wordcloud of most common attribute types and values"
def generate_wordcloud_of_attribute(args, db_driver, attr_type, usage_count):
max_words = args.wordcloud_entries
if max_words < 1:
return
freq2 = []
print "generating wordcloud of values of", attr_type
with db_driver.session() as session2:
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name:{attr_type}}), (a:Attribute)-->(v:AttributeValue) " \
"RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT {max_words}"
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) \
RETURN a.value AS value, count(u) AS usage_count \
ORDER BY count(u) DESC \
LIMIT {max_words}"
results2 = session2.run(cypher, {"attr_type":attr_type, "max_words":max_words})
for result2 in results2:
freq2.append((result2["value"], result2["usage_count"]))
wc = wordcloud.WordCloud(width=640, height=512, scale=2.0, max_words=max_words).generate_from_frequencies(freq2)
wc.recolor(color_func=grey_color_func, random_state=3)
try:
os.makedirs("neo4j-analysis/word_clouds")
except OSError:
pass
wc.to_file("neo4j-analysis/word_clouds/cloud-values-{:07d}-{}.png".format(usage_count,attr_type))
print "generated wordcloud of values of", attr_type
def attribute_value_mapped(args, db_driver, attr_type, usage_count):
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) " \
"RETURN COUNT(u) AS usage_count, COUNT(a.iri) AS mapped "
with db_driver.session() as session:
result = session.run(cypher, {"attr_type": attr_type})
for record in result:
prop = float(record["mapped"]) / float(usage_count)
print "for type '{:s}' ontologies terms are mapped for {:.0%} of uses".format(attr_type, prop)
return prop
def attribute_value_mapped_label_match(args, db_driver, attr_type, usage_count):
cypher = 'MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}})-->(:OntologyTerm)-->(eo:EfoOntologyTerm) \
WHERE eo.label = a.value OR a.value IN eo.`synonyms[]`\
RETURN COUNT(u) AS label_match_count'
with db_driver.session() as session:
result = session.run(cypher, {"attr_type": attr_type})
for record in result:
prop = float(record["label_match_count"]) / float(usage_count)
print "for type '{:s}' ontologies terms have the same value for {:.0%} of uses".format(attr_type, prop)
return prop
def attribute_value_coverage(args, db_driver, attr_type, usage_count, prop, maxcount):
with db_driver.session() as session:
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) \
RETURN a.value, count(u) AS count_s \
ORDER BY count(u) DESC \
LIMIT {maxcount}"
result = session.run(cypher, {"attr_type":attr_type, "maxcount":maxcount})
running_total = 0
i = 0
for record in result:
i += 1
running_total += record["count_s"]
if running_total > float(usage_count)*prop:
print "for type '{:s}' the top {:d} values cover {:.0%} of uses".format(attr_type,i,prop)
return i
if i >= maxcount:
print "for type '{:s}' the top {:d} values do not cover {:.0%} of uses".format(attr_type,maxcount,prop)
return None
def number_of_values_per_type(args, db_driver):
print "generating spreadsheet with number of values for each attribute type"
try:
os.makedirs("neo4j-analysis/csv")
except OSError:
pass
with open("neo4j-analysis/csv/num_values_distribution.csv", "w") as fileout:
csvout = csv.writer(fileout)
cypher = "MATCH (s:Sample)-->(a:Attribute)-->(at:AttributeType) \
WITH at.name AS attr_type, COUNT(DISTINCT a.value) AS n_values, COUNT(s) AS n_samples \
RETURN attr_type, n_values, n_samples, toFloat(n_values)/toFloat(n_samples) AS ratio \
ORDER BY n_samples DESC \
LIMIT 50"
print "%s, %s, %s, %s" % ("Attribute type", "Number of values", "Number of samples", "Ratio")
csvout.writerow(["Attribute type", "Number of values", "Number of samples", "Ratio"])
values = []
with db_driver.session() as session:
results = session.run(cypher)
for record in results:
record_tuple = (record["attr_type"],
record["n_values"],
record["n_samples"],
record["ratio"])
# print "%s, %d, %d, %.2f" % record_tuple
values.append(record_tuple)
csvout.writerow([x for x in record_tuple])
attr_types, n_values, n_samples, ratios = map(list, zip(*values))
counts = np.bincount(n_values)
stats = {"mean": np.mean(n_values), "median": np.median(n_values), "mode": np.argmax(counts)}
print "Mean: %d" % (stats["mean"])
print "Median: %d" % (stats["median"])
print "Mode: %d" % (stats["mode"])
fig = matplotlib.pyplot.figure(figsize=(24, 18))
ax1 = fig.add_subplot(211)
ax1.bar(np.arange(len(n_values)), n_values, align="center")
ax1.set_yscale("log")
ax1.set_xticks(np.arange(len(n_values)))
ax1.set_ylabel("Number of attribute values")
ax2 = fig.add_subplot(212)
ax2.bar(np.arange(len(n_values)), ratios, align="center")
ax2.set_xticks(np.arange(len(n_values)))
ax2.set_xticklabels(attr_types, rotation=90)
ax2.set_xlabel("Attribute types")
ax2.set_ylabel("Diversity of values")
try:
os.makedirs("neo4j-analysis/plot")
except OSError:
pass
fig.savefig("neo4j-analysis/plot/value-diversity.png", bbox_inches='tight')
def attribute_value_child_of_type(args, db_driver, attr_type, usage_count):
with db_driver.session() as session:
cypher = \
"MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) " \
"OPTIONAL MATCH (a)-[:hasIri]->(:OntologyTerm)-[:inEfo]->(:EfoOntologyTerm)" \
"-[:hasParent*1..]->(eo:EfoOntologyTerm{label:{onto_term}}) " \
"RETURN count(distinct u) as count, eo IS NULL as ontology_missing"
results = session.run(cypher, {"attr_type": attr_type, "onto_term": attr_type.lower()})
values = dict()
for record in results:
if record["ontology_missing"]:
values["missing"] = record["count"]
else:
values["not_missing"] = record["count"]
count = values["not_missing"] if "not_missing" in values else 0
percentage = 100 * (float(count) / usage_count)
print "for type {:s} ontologies terms are mapped to a child term are {:02f}% of uses".format(attr_type, percentage)
def attribute_values_mapped_to_obsolete_terms(args, db_driver, attr_type, usage_count):
with db_driver.session() as session:
cypher = \
"MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}})" \
"-->(o:OntologyTerm)-[inefo:inEfo]->(efo:EfoOntologyTerm{obsolete:'True'}) " \
"RETURN DISTINCT a.value AS value, COUNT(u) AS count, o.iri AS iri"
results = session.run(cypher, {"attr_type": attr_type})
total = 0
for record in results:
total += record["count"]
percentage = 100 * (float(total) / usage_count)
print "for type {:s} ontologies terms are mapped to an obsolete ontology term " \
"are {:02f}% of uses".format(attr_type, percentage)
# def attribute_values_matching_efo_label(args, db_driver, attr_type, usage_count ):
# print "generating value matching to efo label spreadsheet"
# max_words = 1000
# with db_driver.session() as session2:
# cypher = \
# "MATCH (s:Sample)-[:hasAttribute]->(a:Attribute{type: '{}'})-->(o:OntologyTerm) WITH s,a,o \
# MATCH (eo:EfoOntologyTerm)<--(o)<--(a)-->(av:AttributeValue) \
# WHERE eo.label <> av.name \
# RETURN eo.label AS label, av.name AS attr_value, COUNT(s) AS sample_count \
# ORDER BY sample_count DESC \
# LIMIT {}".format(attr_type,max_words)
#
# try:
# os.makedirs("neo4j-analysis/csv")
# except OSError:
# pass
#
# with open("neo4j-analysis/csv/{}_efo_label_matching.csv".format(attr_type), "w") as outfile:
# csvout = csv.writer(outfile)
#
# for attr in common:
# row = ["{} ({})".format(attr[0], attr[1])]
#
# with db_driver.session() as session2:
# cypher = "MATCH (s:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name:'"+attr[0]+"'}), \
# (a:Attribute)-->(v:AttributeValue) \
# RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT 10"
# results2 = session2.run(cypher)
# for result2 in results2:
# row.append("{} ({})".format(result2["value"], result2["usage_count"]))
#
# csvout.writerow(row)
if __name__ == "__main__":
print "Welcome to the BioSamples analysis"
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', default="neo4j-server-local")
parser.add_argument('--summary', action='store_true')
parser.add_argument('--wordcloud-entries', type=int, default=1000)
parser.add_argument('--top-attr', type=int, default=0)
parser.add_argument('--attr', action='append')
args = parser.parse_args()
driver = GraphDatabase.driver("bolt://" + args.hostname)
print "Generation of reports started"
# spreadsheet of most common attribute types and values
if args.summary:
generate_summary(args, driver)
attrs = get_most_common_attributes(driver, args.top_attr, force=False)
if args.attr is not None:
for attr in args.attr:
usage_count = get_usage_count(driver, attr)
attrs.append((attr, usage_count))
for attr_type, usage_count in attrs:
# generate_wordcloud_of_attribute(args, driver, attr_type, usage_count)
# attribute_value_mapped(args, driver, attr_type, usage_count)
# attribute_value_mapped_label_match(args, driver, attr_type, usage_count)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.50, 100)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.75, 250)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.95, 500)
# attribute_value_child_of_type(args, driver, attr_type, usage_count)
attribute_values_mapped_to_obsolete_terms(args, driver, attr_type, usage_count)
Update format for the percentage
# pip install neo4j-driver wordcloud matplotlib pillow image
import argparse
import csv
import matplotlib
import numpy as np
import os
import random
import wordcloud
from neo4j.v1 import GraphDatabase, basic_auth
matplotlib.use('Agg')
import matplotlib.pyplot
import re
def grey_color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "hsl(%d, 100%%, 100%%)" % random.randint(120, 160)
def get_most_common_attributes(db_driver, n_attributes, force=True):
attr_types = []
if n_attributes == 0:
return attr_types
else:
if not force and os.path.isfile('neo4j-analysis/csv/attr_common.csv'):
with open('neo4j-analysis/csv/attr_common.csv', 'r') as f:
attr_re = re.compile("(?P<type>(?:\w+\s?)+)\s\((?P<usage_count>\d+)\)")
csv_reader = csv.reader(f)
n = 0
while n < n_attributes:
row = next(csv_reader)
value = row[0]
result = attr_re.match(value)
attr_types.append((str(result.group('type')), int(result.group('usage_count'))))
n += 1
else:
print 'Querying database for the %d most common attribute types' % n_attributes
with db_driver.session() as session:
results = session.run("MATCH (:Sample)-[u:hasAttribute]->(a:Attribute) "
"RETURN a.type AS type, COUNT(u) AS usage_count "
"ORDER BY usage_count DESC "
"LIMIT {n_attributes}", {"n_attributes": n_attributes})
for result in results:
attr_types.append((result["type"], result["usage_count"]))
return attr_types
def get_usage_count(db_driver, attr_type):
with db_driver.session() as session:
results = session.run("MATCH (s:Sample)-[u:hasAttribute]->(:Attribute{type:{attr_type}}) "
"RETURN COUNT(u) AS usage_count", {"attr_type": attr_type})
for result in results:
return result["usage_count"]
def generate_summary(args, db_driver):
print "generating summary of most common attribute types and values"
generate_summary_spreadsheet(args, db_driver)
generate_summary_plots(args, db_driver)
generate_summary_wordcloud(args, db_driver)
print "generated summary of most common attribute types and values"
def generate_summary_spreadsheet(args, db_driver):
print "generating summary spreadsheet of most common attribute types and values"
common = get_most_common_attributes(db_driver, 100, force=True)
try:
os.makedirs("neo4j-analysis/csv")
except OSError:
pass
with open("neo4j-analysis/csv/attr_common.csv", "w") as outfile:
csvout = csv.writer(outfile)
for attr in common:
row = ["{} ({})".format(attr[0], attr[1])]
with db_driver.session() as session2:
cypher = "MATCH (s:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name: {attr_type}}), \
(a:Attribute)-->(v:AttributeValue) \
RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT 10"
results2 = session2.run(cypher, {"attr_type": attr[0]})
for result2 in results2:
row.append("{} ({})".format(result2["value"], result2["usage_count"]))
csvout.writerow(row)
print "generated summary spreadsheet of most common attribute types and values"
def generate_summary_plots(args, db_driver):
print "generating summary plots of most common attribute types and values"
cypher = "MATCH (s:Sample)-->(a:Attribute) \
WITH s, COUNT(DISTINCT a) AS attr_count \
RETURN attr_count, COUNT(s) as samples_count \
ORDER BY attr_count ASC"
n_attr = []
n_samples = []
with db_driver.session() as session:
results = session.run(cypher)
for record in results:
n_attr.append(record["attr_count"])
n_samples.append(record["samples_count"])
fig = matplotlib.pyplot.figure(figsize=(12, 6))
axis = fig.add_axes((0.0, 0.0, 1.0, 1.0), title="Frequency distribution of number of attributes on each sample")
axis.bar(n_attr, n_samples)
axis.set_yscale("log")
axis.set_xlabel("Number of attributes")
axis.set_ylabel("Frequency")
try:
os.makedirs("neo4j-analysis/plot")
except OSError:
pass
fig.savefig("neo4j-analysis/plot/freq-of-number-attrs.png", bbox_inches='tight')
"""
There are some samples that have many many attributes. Typically, these are survey results
e.g. SAMEA4394014
"""
print "generated summary plots of most common attribute types and values"
def generate_summary_wordcloud(args, db_driver):
max_words = args.wordcloud_entries
if max_words < 1:
return
freq = []
print "generating wordcloud of most common attribute types and values"
common = get_most_common_attributes(db_driver, max_words)
for attr in common:
freq.append((attr[0], attr[1]))
wc = wordcloud.WordCloud(width=640, height=512, scale=2.0, max_words=max_words).generate_from_frequencies(freq)
wc.recolor(color_func=grey_color_func, random_state=3)
try:
os.makedirs("neo4j-analysis/word_clouds")
except OSError:
pass
wc.to_file("neo4j-analysis/word_clouds/cloud-types.png")
print "generated wordcloud of most common attribute types and values"
def generate_wordcloud_of_attribute(args, db_driver, attr_type, usage_count):
max_words = args.wordcloud_entries
if max_words < 1:
return
freq2 = []
print "generating wordcloud of values of", attr_type
with db_driver.session() as session2:
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name:{attr_type}}), (a:Attribute)-->(v:AttributeValue) " \
"RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT {max_words}"
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) \
RETURN a.value AS value, count(u) AS usage_count \
ORDER BY count(u) DESC \
LIMIT {max_words}"
results2 = session2.run(cypher, {"attr_type":attr_type, "max_words":max_words})
for result2 in results2:
freq2.append((result2["value"], result2["usage_count"]))
wc = wordcloud.WordCloud(width=640, height=512, scale=2.0, max_words=max_words).generate_from_frequencies(freq2)
wc.recolor(color_func=grey_color_func, random_state=3)
try:
os.makedirs("neo4j-analysis/word_clouds")
except OSError:
pass
wc.to_file("neo4j-analysis/word_clouds/cloud-values-{:07d}-{}.png".format(usage_count,attr_type))
print "generated wordcloud of values of", attr_type
def attribute_value_mapped(args, db_driver, attr_type, usage_count):
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) " \
"RETURN COUNT(u) AS usage_count, COUNT(a.iri) AS mapped "
with db_driver.session() as session:
result = session.run(cypher, {"attr_type": attr_type})
for record in result:
prop = float(record["mapped"]) / float(usage_count)
print "for type '{:s}' ontologies terms are mapped for {:.0%} of uses".format(attr_type, prop)
return prop
def attribute_value_mapped_label_match(args, db_driver, attr_type, usage_count):
cypher = 'MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}})-->(:OntologyTerm)-->(eo:EfoOntologyTerm) \
WHERE eo.label = a.value OR a.value IN eo.`synonyms[]`\
RETURN COUNT(u) AS label_match_count'
with db_driver.session() as session:
result = session.run(cypher, {"attr_type": attr_type})
for record in result:
prop = float(record["label_match_count"]) / float(usage_count)
print "for type '{:s}' ontologies terms have the same value for {:.0%} of uses".format(attr_type, prop)
return prop
def attribute_value_coverage(args, db_driver, attr_type, usage_count, prop, maxcount):
with db_driver.session() as session:
cypher = "MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) \
RETURN a.value, count(u) AS count_s \
ORDER BY count(u) DESC \
LIMIT {maxcount}"
result = session.run(cypher, {"attr_type":attr_type, "maxcount":maxcount})
running_total = 0
i = 0
for record in result:
i += 1
running_total += record["count_s"]
if running_total > float(usage_count)*prop:
print "for type '{:s}' the top {:d} values cover {:.0%} of uses".format(attr_type,i,prop)
return i
if i >= maxcount:
print "for type '{:s}' the top {:d} values do not cover {:.0%} of uses".format(attr_type,maxcount,prop)
return None
def number_of_values_per_type(args, db_driver):
print "generating spreadsheet with number of values for each attribute type"
try:
os.makedirs("neo4j-analysis/csv")
except OSError:
pass
with open("neo4j-analysis/csv/num_values_distribution.csv", "w") as fileout:
csvout = csv.writer(fileout)
cypher = "MATCH (s:Sample)-->(a:Attribute)-->(at:AttributeType) \
WITH at.name AS attr_type, COUNT(DISTINCT a.value) AS n_values, COUNT(s) AS n_samples \
RETURN attr_type, n_values, n_samples, toFloat(n_values)/toFloat(n_samples) AS ratio \
ORDER BY n_samples DESC \
LIMIT 50"
print "%s, %s, %s, %s" % ("Attribute type", "Number of values", "Number of samples", "Ratio")
csvout.writerow(["Attribute type", "Number of values", "Number of samples", "Ratio"])
values = []
with db_driver.session() as session:
results = session.run(cypher)
for record in results:
record_tuple = (record["attr_type"],
record["n_values"],
record["n_samples"],
record["ratio"])
# print "%s, %d, %d, %.2f" % record_tuple
values.append(record_tuple)
csvout.writerow([x for x in record_tuple])
attr_types, n_values, n_samples, ratios = map(list, zip(*values))
counts = np.bincount(n_values)
stats = {"mean": np.mean(n_values), "median": np.median(n_values), "mode": np.argmax(counts)}
print "Mean: %d" % (stats["mean"])
print "Median: %d" % (stats["median"])
print "Mode: %d" % (stats["mode"])
fig = matplotlib.pyplot.figure(figsize=(24, 18))
ax1 = fig.add_subplot(211)
ax1.bar(np.arange(len(n_values)), n_values, align="center")
ax1.set_yscale("log")
ax1.set_xticks(np.arange(len(n_values)))
ax1.set_ylabel("Number of attribute values")
ax2 = fig.add_subplot(212)
ax2.bar(np.arange(len(n_values)), ratios, align="center")
ax2.set_xticks(np.arange(len(n_values)))
ax2.set_xticklabels(attr_types, rotation=90)
ax2.set_xlabel("Attribute types")
ax2.set_ylabel("Diversity of values")
try:
os.makedirs("neo4j-analysis/plot")
except OSError:
pass
fig.savefig("neo4j-analysis/plot/value-diversity.png", bbox_inches='tight')
def attribute_value_child_of_type(args, db_driver, attr_type, usage_count):
with db_driver.session() as session:
cypher = \
"MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}}) " \
"OPTIONAL MATCH (a)-[:hasIri]->(:OntologyTerm)-[:inEfo]->(:EfoOntologyTerm)" \
"-[:hasParent*1..]->(eo:EfoOntologyTerm{label:{onto_term}}) " \
"RETURN count(distinct u) as count, eo IS NULL as ontology_missing"
results = session.run(cypher, {"attr_type": attr_type, "onto_term": attr_type.lower()})
values = dict()
for record in results:
if record["ontology_missing"]:
values["missing"] = record["count"]
else:
values["not_missing"] = record["count"]
count = values["not_missing"] if "not_missing" in values else 0
percentage = 100 * (float(count) / usage_count)
print "for type {:s} ontologies terms are mapped to a child term are {:02f}% of uses".format(attr_type, percentage)
def attribute_values_mapped_to_obsolete_terms(args, db_driver, attr_type, usage_count):
with db_driver.session() as session:
cypher = \
"MATCH (:Sample)-[u:hasAttribute]->(a:Attribute{type:{attr_type}})" \
"-->(o:OntologyTerm)-[inefo:inEfo]->(efo:EfoOntologyTerm{obsolete:'True'}) " \
"RETURN DISTINCT a.value AS value, COUNT(u) AS count, o.iri AS iri"
results = session.run(cypher, {"attr_type": attr_type})
total = 0
for record in results:
total += record["count"]
percentage = 100 * (float(total) / usage_count)
print "for type {:s} ontologies terms are mapped to an obsolete ontology term " \
"are {:02.2f}% of uses".format(attr_type, percentage)
# def attribute_values_matching_efo_label(args, db_driver, attr_type, usage_count ):
# print "generating value matching to efo label spreadsheet"
# max_words = 1000
# with db_driver.session() as session2:
# cypher = \
# "MATCH (s:Sample)-[:hasAttribute]->(a:Attribute{type: '{}'})-->(o:OntologyTerm) WITH s,a,o \
# MATCH (eo:EfoOntologyTerm)<--(o)<--(a)-->(av:AttributeValue) \
# WHERE eo.label <> av.name \
# RETURN eo.label AS label, av.name AS attr_value, COUNT(s) AS sample_count \
# ORDER BY sample_count DESC \
# LIMIT {}".format(attr_type,max_words)
#
# try:
# os.makedirs("neo4j-analysis/csv")
# except OSError:
# pass
#
# with open("neo4j-analysis/csv/{}_efo_label_matching.csv".format(attr_type), "w") as outfile:
# csvout = csv.writer(outfile)
#
# for attr in common:
# row = ["{} ({})".format(attr[0], attr[1])]
#
# with db_driver.session() as session2:
# cypher = "MATCH (s:Sample)-[u:hasAttribute]->(a:Attribute)-->(t:AttributeType{name:'"+attr[0]+"'}), \
# (a:Attribute)-->(v:AttributeValue) \
# RETURN v.name AS value, COUNT(u) AS usage_count ORDER BY usage_count DESC LIMIT 10"
# results2 = session2.run(cypher)
# for result2 in results2:
# row.append("{} ({})".format(result2["value"], result2["usage_count"]))
#
# csvout.writerow(row)
if __name__ == "__main__":
print "Welcome to the BioSamples analysis"
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', default="neo4j-server-local")
parser.add_argument('--summary', action='store_true')
parser.add_argument('--wordcloud-entries', type=int, default=1000)
parser.add_argument('--top-attr', type=int, default=0)
parser.add_argument('--attr', action='append')
args = parser.parse_args()
driver = GraphDatabase.driver("bolt://" + args.hostname)
print "Generation of reports started"
# spreadsheet of most common attribute types and values
if args.summary:
generate_summary(args, driver)
attrs = get_most_common_attributes(driver, args.top_attr, force=False)
if args.attr is not None:
for attr in args.attr:
usage_count = get_usage_count(driver, attr)
attrs.append((attr, usage_count))
for attr_type, usage_count in attrs:
# generate_wordcloud_of_attribute(args, driver, attr_type, usage_count)
# attribute_value_mapped(args, driver, attr_type, usage_count)
# attribute_value_mapped_label_match(args, driver, attr_type, usage_count)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.50, 100)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.75, 250)
# attribute_value_coverage(args, driver, attr_type, usage_count, 0.95, 500)
# attribute_value_child_of_type(args, driver, attr_type, usage_count)
attribute_values_mapped_to_obsolete_terms(args, driver, attr_type, usage_count)
|
"""
Simple module to interact with Fieldbook (fieldbook.com)
Every function needs 3 standard args, an api key, a book id to work with, and a
table to work with. They are the first three args in that order. From there,
each function has different args.
:function get: Get data from Fieldbook
:function update: Change data in Fieldbook
:function delete: Delete data from Fieldbook
:function create: Add new data to Fieldbook
"""
import json
import requests
global url
url = 'https://api.fieldbook.com/v1/'
global headers
headers = {'content-type': 'application/json', 'accept':'application/json'}
def get(api_key, book_id, table, row):
"""
Get data stored in Fieldbook table or row.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to retrieve, if not positive real no row assumed, ex 1
:return: Requests object containing request information and json.
"""
global url
global headers
if row < 1:
final_url = url + book_id + '/' + table
else:
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.get(final_url, headers = headers, auth = api_key)
return r
def update(api_key, book_id, table, row, value):
"""
Update data stored in Fieldbook table to new values.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to update, ex 1
:arg value: Dict containing values to be updated in column:value form,
ex {'task':'Clean', 'length':'10 minutes'}
:return: Requests object containing request information and json.
"""
global url
global headers
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.patch(final_url, json.dumps(value),
headers = headers, auth = api_key)
return r
def delete(api_key, book_id, table, row):
"""
Delete data stored in Fieldbook table.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to delete, ex 1
:return: Requests object containing request information and json.
"""
global url
headers = {'accept':'application/json'}
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.delete(final_url, headers = headers, auth = api_key)
return r
def create(api_key, book_id, table, value):
"""
Create new row in Fieldbook table.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg value: Dict containing values for new row in column:value form,
ex {'task':'Clean', 'length':'10 minutes'}
:return: Requests object containing request information and json.
"""
global url
global headers
final_url = url + book_id + '/' + table
r = requests.post(final_url, json.dumps(value), headers = headers,
auth = api_key)
return r
Make docs more readable
"""
Simple module to interact with Fieldbook (fieldbook.com)
Every function needs 3 standard args: an api key, a book id to work with, and a
table to work with. They are the first three args in that order. From there,
each function has different args.
:function get: Get data from Fieldbook
:function update: Change data in Fieldbook
:function delete: Delete data from Fieldbook
:function create: Add new data to Fieldbook
"""
import json
import requests
global url
url = 'https://api.fieldbook.com/v1/'
global headers
headers = {'content-type': 'application/json', 'accept':'application/json'}
def get(api_key, book_id, table, row):
"""
Get data stored in Fieldbook table or row.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to retrieve, if not positive real no row assumed, ex 1
:return: Requests object containing request information and json.
"""
global url
global headers
if row < 1:
final_url = url + book_id + '/' + table
else:
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.get(final_url, headers = headers, auth = api_key)
return r
def update(api_key, book_id, table, row, value):
"""
Update data stored in Fieldbook table to new values.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to update, ex 1
:arg value: Dict containing values to be updated in column:value form,
ex {'task':'Clean', 'length':'10 minutes'}
:return: Requests object containing request information and json.
"""
global url
global headers
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.patch(final_url, json.dumps(value),
headers = headers, auth = api_key)
return r
def delete(api_key, book_id, table, row):
"""
Delete data stored in Fieldbook table.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg row: Row number to delete, ex 1
:return: Requests object containing request information and json.
"""
global url
headers = {'accept':'application/json'}
final_url = url + book_id + '/' + table + '/' + str(row)
r = requests.delete(final_url, headers = headers, auth = api_key)
return r
def create(api_key, book_id, table, value):
"""
Create new row in Fieldbook table.
:arg api_key: A tuple with authentication, ex ('key1', '7f7d7s738858f7g')
:arg book_id: A string with the Fieldbook book id, ex '7s87tt466rg86drg8'
:arg table: A string with the table from the book, ex 'assignments'
:arg value: Dict containing values for new row in column:value form,
ex {'task':'Clean', 'length':'10 minutes'}
:return: Requests object containing request information and json.
"""
global url
global headers
final_url = url + book_id + '/' + table
r = requests.post(final_url, json.dumps(value), headers = headers,
auth = api_key)
return r
|
import io
import getopt
import sys
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results")
if __name__ == '__main__':
dict_file = postings_file = query_file = output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
elif o == '-q':
query_file = a
elif o == '-o':
output_file = a
else:
assert False, "unhandled option"
if dict_file == None or postings_file == None or query_file == None or output_file == None:
usage()
sys.exit(2)
Implement loading of dictionary and postings list
import io
import getopt
import sys
import pickle
def usage():
print("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results")
if __name__ == '__main__':
dict_file = postings_file = query_file = output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
elif o == '-q':
query_file = a
elif o == '-o':
output_file = a
else:
assert False, "unhandled option"
if dict_file == None or postings_file == None or query_file == None or output_file == None:
usage()
sys.exit(2)
with io.open(dict_file, 'rb') as f:
dictionary = pickle.load(f)
with io.open(postings_file, 'rb') as f:
postings = pickle.load(f)
skip_pointers = pickle.load(f)
|
# coding=utf-8
"""
The CPUCollector collects CPU utilization metric using /proc/stat.
#### Dependencies
* /proc/stat
"""
import diamond.collector
import os
import time
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
class CPUCollector(diamond.collector.Collector):
PROC = '/proc/stat'
INTERVAL = 1
MAX_VALUES = {
'user': diamond.collector.MAX_COUNTER,
'nice': diamond.collector.MAX_COUNTER,
'system': diamond.collector.MAX_COUNTER,
'idle': diamond.collector.MAX_COUNTER,
'iowait': diamond.collector.MAX_COUNTER,
'irq': diamond.collector.MAX_COUNTER,
'softirq': diamond.collector.MAX_COUNTER,
'steal': diamond.collector.MAX_COUNTER,
'guest': diamond.collector.MAX_COUNTER,
'guest_nice': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(CPUCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'cpu',
'xenfix': None,
'simple': 'False',
})
return config
def collect(self):
"""
Collector cpu stats
"""
def cpu_time_list():
"""
get cpu time list
"""
statFile = open(self.PROC, "r")
timeList = statFile.readline().split(" ")[2:6]
for i in range(len(timeList)):
timeList[i] = int(timeList[i])
statFile.close()
return timeList
def cpu_delta_time(interval):
"""
Get before and after cpu times for usage calc
"""
pre_check = cpu_time_list()
time.sleep(interval)
post_check = cpu_time_list()
for i in range(len(pre_check)):
post_check[i] -= pre_check[i]
return post_check
if os.access(self.PROC, os.R_OK):
#If simple only return aggregate CPU% metric
if self.config['simple'] == 'True':
dt = cpu_delta_time(self.INTERVAL)
cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
self.publish('percent', str('%.4f' % cpuPct))
return True
results = {}
# Open file
file = open(self.PROC)
for line in file:
if not line.startswith('cpu'):
continue
elements = line.split()
cpu = elements[0]
if cpu == 'cpu':
cpu = 'total'
results[cpu] = {}
if len(elements) >= 2:
results[cpu]['user'] = elements[1]
if len(elements) >= 3:
results[cpu]['nice'] = elements[2]
if len(elements) >= 4:
results[cpu]['system'] = elements[3]
if len(elements) >= 5:
results[cpu]['idle'] = elements[4]
if len(elements) >= 6:
results[cpu]['iowait'] = elements[5]
if len(elements) >= 7:
results[cpu]['irq'] = elements[6]
if len(elements) >= 8:
results[cpu]['softirq'] = elements[7]
if len(elements) >= 9:
results[cpu]['steal'] = elements[8]
if len(elements) >= 10:
results[cpu]['guest'] = elements[9]
if len(elements) >= 11:
results[cpu]['guest_nice'] = elements[10]
# Close File
file.close()
metrics = {}
for cpu in results.keys():
stats = results[cpu]
for s in stats.keys():
# Get Metric Name
metric_name = '.'.join([cpu, s])
# Get actual data
metrics[metric_name] = self.derivative(metric_name,
long(stats[s]),
self.MAX_VALUES[s])
# Check for a bug in xen where the idle time is doubled for guest
# See https://bugzilla.redhat.com/show_bug.cgi?id=624756
if self.config['xenfix'] is None or self.config['xenfix'] == True:
if os.path.isdir('/proc/xen'):
total = 0
for metric_name in metrics.keys():
if 'cpu0.' in metric_name:
total += int(metrics[metric_name])
if total > 110:
self.config['xenfix'] = True
for mname in metrics.keys():
if '.idle' in mname:
metrics[mname] = float(metrics[mname]) / 2
elif total > 0:
self.config['xenfix'] = False
else:
self.config['xenfix'] = False
# Publish Metric Derivative
for metric_name in metrics.keys():
self.publish(metric_name,
metrics[metric_name])
return True
elif psutil:
cpu_time = psutil.cpu_times(True)
total_time = psutil.cpu_times()
for i in range(0, len(cpu_time)):
metric_name = 'cpu' + str(i)
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
cpu_time[i].user,
self.MAX_VALUES['user']))
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
cpu_time[i].nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
cpu_time[i].system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
cpu_time[i].idle,
self.MAX_VALUES['idle']))
metric_name = 'total'
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
total_time.user,
self.MAX_VALUES['user']))
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
total_time.nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
total_time.system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
total_time.idle,
self.MAX_VALUES['idle']))
return True
return None
Add option to disable reporting of all cores other than total, as on
servers with large numbers of cores this can use a lot of disk space with
little interest in per core numbers.
# coding=utf-8
"""
The CPUCollector collects CPU utilization metric using /proc/stat.
#### Dependencies
* /proc/stat
"""
import diamond.collector
import os
import time
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
class CPUCollector(diamond.collector.Collector):
PROC = '/proc/stat'
INTERVAL = 1
MAX_VALUES = {
'user': diamond.collector.MAX_COUNTER,
'nice': diamond.collector.MAX_COUNTER,
'system': diamond.collector.MAX_COUNTER,
'idle': diamond.collector.MAX_COUNTER,
'iowait': diamond.collector.MAX_COUNTER,
'irq': diamond.collector.MAX_COUNTER,
'softirq': diamond.collector.MAX_COUNTER,
'steal': diamond.collector.MAX_COUNTER,
'guest': diamond.collector.MAX_COUNTER,
'guest_nice': diamond.collector.MAX_COUNTER,
}
def get_default_config_help(self):
config_help = super(CPUCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'enabled': 'True',
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
})
return config
def collect(self):
"""
Collector cpu stats
"""
def cpu_time_list():
"""
get cpu time list
"""
statFile = open(self.PROC, "r")
timeList = statFile.readline().split(" ")[2:6]
for i in range(len(timeList)):
timeList[i] = int(timeList[i])
statFile.close()
return timeList
def cpu_delta_time(interval):
"""
Get before and after cpu times for usage calc
"""
pre_check = cpu_time_list()
time.sleep(interval)
post_check = cpu_time_list()
for i in range(len(pre_check)):
post_check[i] -= pre_check[i]
return post_check
if os.access(self.PROC, os.R_OK):
#If simple only return aggregate CPU% metric
if self.config['simple'] == 'True':
dt = cpu_delta_time(self.INTERVAL)
cpuPct = 100 - (dt[len(dt) - 1] * 100.00 / sum(dt))
self.publish('percent', str('%.4f' % cpuPct))
return True
results = {}
# Open file
file = open(self.PROC)
for line in file:
if not line.startswith('cpu'):
continue
elements = line.split()
cpu = elements[0]
if cpu == 'cpu':
cpu = 'total'
elif self.config['percore'] == 'False':
continue
results[cpu] = {}
if len(elements) >= 2:
results[cpu]['user'] = elements[1]
if len(elements) >= 3:
results[cpu]['nice'] = elements[2]
if len(elements) >= 4:
results[cpu]['system'] = elements[3]
if len(elements) >= 5:
results[cpu]['idle'] = elements[4]
if len(elements) >= 6:
results[cpu]['iowait'] = elements[5]
if len(elements) >= 7:
results[cpu]['irq'] = elements[6]
if len(elements) >= 8:
results[cpu]['softirq'] = elements[7]
if len(elements) >= 9:
results[cpu]['steal'] = elements[8]
if len(elements) >= 10:
results[cpu]['guest'] = elements[9]
if len(elements) >= 11:
results[cpu]['guest_nice'] = elements[10]
# Close File
file.close()
metrics = {}
for cpu in results.keys():
stats = results[cpu]
for s in stats.keys():
# Get Metric Name
metric_name = '.'.join([cpu, s])
# Get actual data
metrics[metric_name] = self.derivative(metric_name,
long(stats[s]),
self.MAX_VALUES[s])
# Check for a bug in xen where the idle time is doubled for guest
# See https://bugzilla.redhat.com/show_bug.cgi?id=624756
if self.config['xenfix'] is None or self.config['xenfix'] == True:
if os.path.isdir('/proc/xen'):
total = 0
for metric_name in metrics.keys():
if 'cpu0.' in metric_name:
total += int(metrics[metric_name])
if total > 110:
self.config['xenfix'] = True
for mname in metrics.keys():
if '.idle' in mname:
metrics[mname] = float(metrics[mname]) / 2
elif total > 0:
self.config['xenfix'] = False
else:
self.config['xenfix'] = False
# Publish Metric Derivative
for metric_name in metrics.keys():
self.publish(metric_name,
metrics[metric_name])
return True
elif psutil:
cpu_time = psutil.cpu_times(True)
total_time = psutil.cpu_times()
for i in range(0, len(cpu_time)):
metric_name = 'cpu' + str(i)
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
cpu_time[i].user,
self.MAX_VALUES['user']))
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
cpu_time[i].nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
cpu_time[i].system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
cpu_time[i].idle,
self.MAX_VALUES['idle']))
metric_name = 'total'
self.publish(metric_name + '.user',
self.derivative(metric_name + '.user',
total_time.user,
self.MAX_VALUES['user']))
self.publish(metric_name + '.nice',
self.derivative(metric_name + '.nice',
total_time.nice,
self.MAX_VALUES['nice']))
self.publish(metric_name + '.system',
self.derivative(metric_name + '.system',
total_time.system,
self.MAX_VALUES['system']))
self.publish(metric_name + '.idle',
self.derivative(metric_name + '.idle',
total_time.idle,
self.MAX_VALUES['idle']))
return True
return None
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from chatexchange_extension import Client
from html.parser import HTMLParser
from html import unescape
from hashlib import md5
from configparser import NoOptionError, RawConfigParser
from helpers import environ_or_none, log
import threading
# noinspection PyCompatibility
import regex
import subprocess as sp
from dulwich.repo import Repo
import platform
def git_commit_info():
git = Repo('.')
commit = git.get_object(git.head())
return {'id': commit.id.decode("utf-8")[0:7], 'id_full': commit.id.decode("utf-8"),
'author': regex.findall("(.*?) <(.*?)>", commit.author.decode("utf-8"))[0],
'message': commit.message.decode("utf-8").strip('\r\n').split('\n')[0]}
def git_status():
if 'windows' in platform.platform().lower():
data = sp.Popen(['git', 'status'], shell=True, cwd=os.getcwd(), stderr=sp.PIPE, stdout=sp.PIPE).communicate()
else:
data = sp.Popen(['git status'], shell=True, cwd=os.getcwd(), stderr=sp.PIPE, stdout=sp.PIPE).communicate()
if not data[1]:
return data[0].decode('utf-8').strip('\n')
else:
raise OSError("Git error!")
# This is needed later on for properly 'stripping' unicode weirdness out of git log data.
# Otherwise, we can't properly work with git log data.
def strip_escape_chars(line):
line = str(line)
ansi_escape = regex.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', line).strip('=\r\r\x1b>\n"')
# noinspection PyClassHasNoInit,PyDeprecation,PyUnresolvedReferences
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
blacklisted_usernames = []
blacklisted_websites = []
bad_keywords = []
watched_keywords = {}
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
api_backoff_time = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
socvr_room_id = "41570"
blockedTime = {"all": 0, charcoal_room_id: 0, meta_tavern_room_id: 0, socvr_room_id: 0}
metasmoke_last_ping_time = datetime.now()
not_privileged_warning = """
You are not a privileged user. Please see
[the privileges wiki page](https://charcoal-se.org/smokey/Privileges) for
information on what privileges are and what is expected of privileged users.
""".strip().replace("\n", " ")
experimental_reasons = [ # Don't widely report these
"potentially bad keyword in answer",
"potentially bad keyword in body",
"potentially bad keyword in title",
"potentially bad keyword in username"
"toxic body detected",
"toxic answer detected"]
non_socvr_reasons = [] # Don't report to SOCVR
non_tavern_reasons = [ # Don't report in the Tavern
"all-caps body",
"all-caps answer",
"repeating characters in body",
"repeating characters in title",
"repeating characters in answer",
"few unique characters in body",
"few unique characters in answer",
"title has only one unique char",
"phone number detected in title",
"offensive body detected",
"no whitespace in body",
"no whitespace in answer",
]
non_tavern_sites = ["stackoverflow.com"]
parser = HTMLParser()
parser.unescape = unescape
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
wrapso = Client("stackoverflow.com")
privileged_users = {
charcoal_room_id: [
"117490", # Normal Human
"66258", # Andy
"31768", # ManishEarth
"103081", # hichris123
"73046", # Undo
"88521", # ProgramFOX
"59776", # Doorknob
"31465", # Seth
"88577", # Santa Claus
"34124", # Andrew Leach
"54229", # apnorton
"20459", # S.L. Barth
"32436", # tchrist
"30477", # Brock Adams
"58529", # ferrybig
"145208", # Robert Longson
"178825", # Ms Yvette
"171800", # JAL
"64978", # PeterJ
"125141", # Jeffrey Bosboom
"54902", # bummi
"135450", # M.A.R.
"145604", # Quill
"60548", # rene
"121401", # michaelpri
"116218", # JamesENL
"82927", # Braiam
"11606", # bwDraco
"19761", # Ilmari Karonen
"108271", # Andrew T.
"171054", # Magisch
"190011", # Petter Friberg
"165661", # Tunaki
"145086", # Wai Ha Lee
"137665", # ByteCommander
"147884", # wythagoras
"186395", # Åna
"181293", # Ashish Ahuja
"163686", # Gothdo
"145827", # angussidney
"244748", # Supreme Leader SnokeDetector (angussidney's sock)
"121520", # ArtOfCode
"244382", # Lt. A. Code (ArtOfCode's sock to test things with)
"137388", # QPaysTaxes
"212311", # Ryan Bemrose
"172397", # Kyll
"224538", # FrankerZ
"61202", # OldSkool
"56166", # Jan Dvorak
"133966", # DavidPostill
"22839", # djsmiley2k
"97389", # Kaz Wolfe
"144962", # DJMcMayhem
"139423", # NobodyNada
"62118", # tripleee
"130558", # Registered User
"128113", # arda
"164318", # Glorfindel
"175347", # Floern
"180274", # Alexander O'Mara
"158742", # Rob
"207356", # 4castle
"133031", # Mithrandir
"215671", # Locutus of Borg (Mithrandir's Sock)
"169713", # Mego
"126657", # Cerbrus
"10145", # Thomas Ward
"161943", # J F
"195967", # CaffeineAddiction
"5363", # Stijn
"248139", # FelixSFD
"156721", # D-side
"167070", # quartata
"172450", # Hovercraft Full Of Eels
"56200", # Eric Leschinski
"211021", # Henders
"255290", # Gypsy Spellweaver
"64521", # CalvT
"165474", # Hyper Neutrino
"281362", # Hyper Neutrino v2
"169252", # Cai
"155243", # Nisse Engström
"69330", # Sconibulus
"164187", # Okx
"202619", # John Militer
"262693", # suraj
"11287", # Martin Sleziak
"88588", # NVZ
"281674", # paper1111
"279119", # Tetsuya Yamamoto
"307652", # Ajay Brahmakshatriya
],
meta_tavern_room_id: [
"315433", # Normal Human
"244519", # CRABOLO
"244382", # TGMCians
"194047", # Jan Dvorak
"158100", # rene
"178438", # Manishearth
"237685", # hichris123
"215468", # Undo
"229438", # ProgramFOX
"180276", # Doorknob
"161974", # Lynn Crumbling
"186281", # Andy
"266094", # Unihedro
"245167", # Infinite Recursion
"230261", # Jason C
"213575", # Braiam
"241919", # Andrew T.
"203389", # backwards-Seth
"202832", # Mooseman
"160017", # bwDraco
"201151", # bummi
"188558", # Frank
"229166", # Santa Claus
"159034", # Kevin Brown
"203972", # PeterJ
"188673", # Alexis King
"258672", # AstroCB
"227577", # Sam
"255735", # cybermonkey
"279182", # Ixrec
"271104", # James
"220428", # Qantas 94 Heavy
"153355", # tchrist
"238426", # Ed Cottrell
"166899", # Second Rikudo
"287999", # ASCIIThenANSI
"208518", # JNat
"284141", # michaelpri
"260312", # vaultah
"244062", # SouravGhosh
"152859", # Shadow Wizard
"201314", # apnorton
"280934", # M.A.Ramezani
"200235", # durron597
"148310", # Awesome Poodles / Brock Adams
"168333", # S.L. Barth
"257207", # Unikitty
"244282", # DroidDev
"163250", # Cupcake
"298265", # BoomsPlus
"253560", # josilber
"244254", # misterManSam
"188189", # Robert Longson
"174699", # Ilmari Karonen
"202362", # chmod 666 telkitty
"289717", # Quill
"237813", # bjb568
"311345", # Simon Klaver
"171881", # rekire
"260388", # Pandya
"310756", # Ms Yvette
"262399", # Jeffrey Bosboom
"242209", # JAL
"280883", # ByteCommander
"302251", # kos
"262823", # ArtOfCode
"215067", # Ferrybig
"308386", # Magisch
"285368", # angussidney
"158829", # Thomas Ward
"294691", # Mithrandir
"203553", # CalvT
"289971", # Hyper Neutrino
"346854" # DonQuiKong
],
socvr_room_id: [
"1849664", # Undo
"2581872", # hichris123
"1198729", # Manishearth
"3717023", # Normal Human aka 1999
"2619912", # ProgramFOX
"578411", # rene
"1043380", # gunr2171
"2246344", # Sam
"2756409", # TylerH
"1768232", # durron597
"359284", # Kevin Brown
"258400", # easwee
"3622940", # Unihedron
"3204551", # Deduplicator
"4342498", # NathanOliver
"4639281", # Tiny Giant
"3093387", # josilber
"1652962", # cimmanon
"1677912", # Mogsdad
"656243", # Lynn Crumbling
"3933332", # Rizier123
"2422013", # cybermonkey
"3478852", # Nisse Engström
"2302862", # Siguza
"1324", # Paul Roub
"1743880", # Tunaki
"1663001", # DavidG
"2415822", # JAL
"4174897", # Kyll
"5299236", # Kevin Guan
"4050842", # Thaillie
"1816093", # Drew
"874188", # Triplee
"880772", # approxiblue
"1835379", # Cerbrus
"3956566", # JamesENL
"2357233", # Ms Yvette
"3155639", # AlexanderOMara
"462627", # Praveen Kumar
"4490559", # intboolstring
"1364007", # Wai Ha Lee
"1699210", # bummi
"563532", # Rob
"5389107", # Magisch
"4099593", # bhargav-rao
"1542723", # Ferrybig
"2025923", # Tushar
"5292302", # Petter Friberg
"792066", # Braiam
"5666987", # Ian
"3160466", # ArtOfCode
"4688119", # Ashish Ahuja
"3476191", # Nobody Nada
"2227743", # Eric D
"821878", # Ryan Bemrose
"1413395", # Panta Rei
"4875631", # FrankerZ
"2958086", # Compass
"499214", # JanDvorak
"5647260", # Andrew L.
"559745", # Floern
"5743988", # 4castle
"4622463", # angussidney
"603346", # Thomas Ward
"3002139", # Baum mit Augen
"1863564", # QPaysTaxes
"4687348", # FelixSFD
"4751173", # Glorfindel
"2233391", # henders
"4805174", # kayess
"2370483", # Machavity
"1873567", # CalvT
"4826457", # suraj
"8242698", # user0042
"3773011", # Makyen
"2858773" # Ajay Brahmakshatriya
],
'111347': [ # SOBotics
"3160466", # ArtOfCode
"1849664", # Undo
"3002139", # Baum mit Augen
"3476191", # Nobody Nada
"5292302", # Petter Friberg
"4688119", # Ashish Ahuja
"4099593", # Bhargav Rao
"1743880", # Tunaki
"559745", # Floern
"4687348", # FelixSFD
"6375113", # Bugs
"4622463", # angussidney
"563532", # Rob
"4050842", # Thaillie
"1915448" # g00glen00b
]
}
code_privileged_users = None
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345",
socvr_room_id: "3735529", '111347': '3735529'}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = git_commit_info()
if md5(commit['author'][0].encode('utf-8')).hexdigest() in censored_committer_names:
commit['author'] = censored_committer_names[md5(commit['author'][0].encode('utf-8')).hexdigest()]
commit_with_author = "%s (%s: *%s*)" % (commit['id'],
commit['author'][0] if type(commit['author']) in [list, tuple]
else commit['author'],
commit['message'])
on_master = "HEAD detached" not in git_status()
charcoal_hq = None
tavern_on_the_meta = None
socvr = None
s = ""
s_reverted = ""
specialrooms = []
apiquota = -1
bodyfetcher = None
se_sites = []
users_chatting = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: [], '111347': []}
why_data = []
why_data_allspam = []
notifications = []
listen_to_these_if_edited = []
multiple_reporters = []
api_calls_per_site = {}
standby_message = ""
standby_mode = False
api_request_lock = threading.Lock()
num_posts_scanned = 0
post_scan_time = 0
posts_scan_stats_lock = threading.Lock()
config = RawConfigParser()
if os.path.isfile('config'):
config.read('config')
else:
config.read('config.ci')
latest_smokedetector_messages = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: [],
'111347': []}
# environ_or_none defined in helpers.py
bot_name = environ_or_none("SMOKEDETECTOR_NAME") or "SmokeDetector"
bot_repository = environ_or_none("SMOKEDETECTOR_REPO") or "//github.com/Charcoal-SE/SmokeDetector"
chatmessage_prefix = "[{}]({})".format(bot_name, bot_repository)
site_id_dict = {}
post_site_id_to_question = {}
location = config.get("Config", "location")
metasmoke_ws = None
try:
metasmoke_host = config.get("Config", "metasmoke_host")
except NoOptionError:
metasmoke_host = None
log('info', "metasmoke host not found. Set it as metasmoke_host in the config file."
"See https://github.com/Charcoal-SE/metasmoke.")
try:
metasmoke_key = config.get("Config", "metasmoke_key")
except NoOptionError:
metasmoke_key = ""
log('info', "No metasmoke key found, which is okay if both are running on the same host")
try:
metasmoke_ws_host = config.get("Config", "metasmoke_ws_host")
except NoOptionError:
metasmoke_ws_host = ""
log('info', "No metasmoke websocket host found, which is okay if you're anti-websocket")
try:
github_username = config.get("Config", "github_username")
github_password = config.get("Config", "github_password")
except NoOptionError:
github_username = None
github_password = None
try:
perspective_key = config.get("Config", "perspective_key")
except NoOptionError:
perspective_key = None
missing comma
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from chatexchange_extension import Client
from html.parser import HTMLParser
from html import unescape
from hashlib import md5
from configparser import NoOptionError, RawConfigParser
from helpers import environ_or_none, log
import threading
# noinspection PyCompatibility
import regex
import subprocess as sp
from dulwich.repo import Repo
import platform
def git_commit_info():
git = Repo('.')
commit = git.get_object(git.head())
return {'id': commit.id.decode("utf-8")[0:7], 'id_full': commit.id.decode("utf-8"),
'author': regex.findall("(.*?) <(.*?)>", commit.author.decode("utf-8"))[0],
'message': commit.message.decode("utf-8").strip('\r\n').split('\n')[0]}
def git_status():
if 'windows' in platform.platform().lower():
data = sp.Popen(['git', 'status'], shell=True, cwd=os.getcwd(), stderr=sp.PIPE, stdout=sp.PIPE).communicate()
else:
data = sp.Popen(['git status'], shell=True, cwd=os.getcwd(), stderr=sp.PIPE, stdout=sp.PIPE).communicate()
if not data[1]:
return data[0].decode('utf-8').strip('\n')
else:
raise OSError("Git error!")
# This is needed later on for properly 'stripping' unicode weirdness out of git log data.
# Otherwise, we can't properly work with git log data.
def strip_escape_chars(line):
line = str(line)
ansi_escape = regex.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', line).strip('=\r\r\x1b>\n"')
# noinspection PyClassHasNoInit,PyDeprecation,PyUnresolvedReferences
class GlobalVars:
false_positives = []
whitelisted_users = []
blacklisted_users = []
blacklisted_usernames = []
blacklisted_websites = []
bad_keywords = []
watched_keywords = {}
ignored_posts = []
auto_ignored_posts = []
startup_utc = datetime.utcnow().strftime("%H:%M:%S")
latest_questions = []
api_backoff_time = 0
charcoal_room_id = "11540"
meta_tavern_room_id = "89"
socvr_room_id = "41570"
blockedTime = {"all": 0, charcoal_room_id: 0, meta_tavern_room_id: 0, socvr_room_id: 0}
metasmoke_last_ping_time = datetime.now()
not_privileged_warning = """
You are not a privileged user. Please see
[the privileges wiki page](https://charcoal-se.org/smokey/Privileges) for
information on what privileges are and what is expected of privileged users.
""".strip().replace("\n", " ")
experimental_reasons = [ # Don't widely report these
"potentially bad keyword in answer",
"potentially bad keyword in body",
"potentially bad keyword in title",
"potentially bad keyword in username",
"toxic body detected",
"toxic answer detected"]
non_socvr_reasons = [] # Don't report to SOCVR
non_tavern_reasons = [ # Don't report in the Tavern
"all-caps body",
"all-caps answer",
"repeating characters in body",
"repeating characters in title",
"repeating characters in answer",
"few unique characters in body",
"few unique characters in answer",
"title has only one unique char",
"phone number detected in title",
"offensive body detected",
"no whitespace in body",
"no whitespace in answer",
]
non_tavern_sites = ["stackoverflow.com"]
parser = HTMLParser()
parser.unescape = unescape
wrap = Client("stackexchange.com")
wrapm = Client("meta.stackexchange.com")
wrapso = Client("stackoverflow.com")
privileged_users = {
charcoal_room_id: [
"117490", # Normal Human
"66258", # Andy
"31768", # ManishEarth
"103081", # hichris123
"73046", # Undo
"88521", # ProgramFOX
"59776", # Doorknob
"31465", # Seth
"88577", # Santa Claus
"34124", # Andrew Leach
"54229", # apnorton
"20459", # S.L. Barth
"32436", # tchrist
"30477", # Brock Adams
"58529", # ferrybig
"145208", # Robert Longson
"178825", # Ms Yvette
"171800", # JAL
"64978", # PeterJ
"125141", # Jeffrey Bosboom
"54902", # bummi
"135450", # M.A.R.
"145604", # Quill
"60548", # rene
"121401", # michaelpri
"116218", # JamesENL
"82927", # Braiam
"11606", # bwDraco
"19761", # Ilmari Karonen
"108271", # Andrew T.
"171054", # Magisch
"190011", # Petter Friberg
"165661", # Tunaki
"145086", # Wai Ha Lee
"137665", # ByteCommander
"147884", # wythagoras
"186395", # Åna
"181293", # Ashish Ahuja
"163686", # Gothdo
"145827", # angussidney
"244748", # Supreme Leader SnokeDetector (angussidney's sock)
"121520", # ArtOfCode
"244382", # Lt. A. Code (ArtOfCode's sock to test things with)
"137388", # QPaysTaxes
"212311", # Ryan Bemrose
"172397", # Kyll
"224538", # FrankerZ
"61202", # OldSkool
"56166", # Jan Dvorak
"133966", # DavidPostill
"22839", # djsmiley2k
"97389", # Kaz Wolfe
"144962", # DJMcMayhem
"139423", # NobodyNada
"62118", # tripleee
"130558", # Registered User
"128113", # arda
"164318", # Glorfindel
"175347", # Floern
"180274", # Alexander O'Mara
"158742", # Rob
"207356", # 4castle
"133031", # Mithrandir
"215671", # Locutus of Borg (Mithrandir's Sock)
"169713", # Mego
"126657", # Cerbrus
"10145", # Thomas Ward
"161943", # J F
"195967", # CaffeineAddiction
"5363", # Stijn
"248139", # FelixSFD
"156721", # D-side
"167070", # quartata
"172450", # Hovercraft Full Of Eels
"56200", # Eric Leschinski
"211021", # Henders
"255290", # Gypsy Spellweaver
"64521", # CalvT
"165474", # Hyper Neutrino
"281362", # Hyper Neutrino v2
"169252", # Cai
"155243", # Nisse Engström
"69330", # Sconibulus
"164187", # Okx
"202619", # John Militer
"262693", # suraj
"11287", # Martin Sleziak
"88588", # NVZ
"281674", # paper1111
"279119", # Tetsuya Yamamoto
"307652", # Ajay Brahmakshatriya
],
meta_tavern_room_id: [
"315433", # Normal Human
"244519", # CRABOLO
"244382", # TGMCians
"194047", # Jan Dvorak
"158100", # rene
"178438", # Manishearth
"237685", # hichris123
"215468", # Undo
"229438", # ProgramFOX
"180276", # Doorknob
"161974", # Lynn Crumbling
"186281", # Andy
"266094", # Unihedro
"245167", # Infinite Recursion
"230261", # Jason C
"213575", # Braiam
"241919", # Andrew T.
"203389", # backwards-Seth
"202832", # Mooseman
"160017", # bwDraco
"201151", # bummi
"188558", # Frank
"229166", # Santa Claus
"159034", # Kevin Brown
"203972", # PeterJ
"188673", # Alexis King
"258672", # AstroCB
"227577", # Sam
"255735", # cybermonkey
"279182", # Ixrec
"271104", # James
"220428", # Qantas 94 Heavy
"153355", # tchrist
"238426", # Ed Cottrell
"166899", # Second Rikudo
"287999", # ASCIIThenANSI
"208518", # JNat
"284141", # michaelpri
"260312", # vaultah
"244062", # SouravGhosh
"152859", # Shadow Wizard
"201314", # apnorton
"280934", # M.A.Ramezani
"200235", # durron597
"148310", # Awesome Poodles / Brock Adams
"168333", # S.L. Barth
"257207", # Unikitty
"244282", # DroidDev
"163250", # Cupcake
"298265", # BoomsPlus
"253560", # josilber
"244254", # misterManSam
"188189", # Robert Longson
"174699", # Ilmari Karonen
"202362", # chmod 666 telkitty
"289717", # Quill
"237813", # bjb568
"311345", # Simon Klaver
"171881", # rekire
"260388", # Pandya
"310756", # Ms Yvette
"262399", # Jeffrey Bosboom
"242209", # JAL
"280883", # ByteCommander
"302251", # kos
"262823", # ArtOfCode
"215067", # Ferrybig
"308386", # Magisch
"285368", # angussidney
"158829", # Thomas Ward
"294691", # Mithrandir
"203553", # CalvT
"289971", # Hyper Neutrino
"346854" # DonQuiKong
],
socvr_room_id: [
"1849664", # Undo
"2581872", # hichris123
"1198729", # Manishearth
"3717023", # Normal Human aka 1999
"2619912", # ProgramFOX
"578411", # rene
"1043380", # gunr2171
"2246344", # Sam
"2756409", # TylerH
"1768232", # durron597
"359284", # Kevin Brown
"258400", # easwee
"3622940", # Unihedron
"3204551", # Deduplicator
"4342498", # NathanOliver
"4639281", # Tiny Giant
"3093387", # josilber
"1652962", # cimmanon
"1677912", # Mogsdad
"656243", # Lynn Crumbling
"3933332", # Rizier123
"2422013", # cybermonkey
"3478852", # Nisse Engström
"2302862", # Siguza
"1324", # Paul Roub
"1743880", # Tunaki
"1663001", # DavidG
"2415822", # JAL
"4174897", # Kyll
"5299236", # Kevin Guan
"4050842", # Thaillie
"1816093", # Drew
"874188", # Triplee
"880772", # approxiblue
"1835379", # Cerbrus
"3956566", # JamesENL
"2357233", # Ms Yvette
"3155639", # AlexanderOMara
"462627", # Praveen Kumar
"4490559", # intboolstring
"1364007", # Wai Ha Lee
"1699210", # bummi
"563532", # Rob
"5389107", # Magisch
"4099593", # bhargav-rao
"1542723", # Ferrybig
"2025923", # Tushar
"5292302", # Petter Friberg
"792066", # Braiam
"5666987", # Ian
"3160466", # ArtOfCode
"4688119", # Ashish Ahuja
"3476191", # Nobody Nada
"2227743", # Eric D
"821878", # Ryan Bemrose
"1413395", # Panta Rei
"4875631", # FrankerZ
"2958086", # Compass
"499214", # JanDvorak
"5647260", # Andrew L.
"559745", # Floern
"5743988", # 4castle
"4622463", # angussidney
"603346", # Thomas Ward
"3002139", # Baum mit Augen
"1863564", # QPaysTaxes
"4687348", # FelixSFD
"4751173", # Glorfindel
"2233391", # henders
"4805174", # kayess
"2370483", # Machavity
"1873567", # CalvT
"4826457", # suraj
"8242698", # user0042
"3773011", # Makyen
"2858773" # Ajay Brahmakshatriya
],
'111347': [ # SOBotics
"3160466", # ArtOfCode
"1849664", # Undo
"3002139", # Baum mit Augen
"3476191", # Nobody Nada
"5292302", # Petter Friberg
"4688119", # Ashish Ahuja
"4099593", # Bhargav Rao
"1743880", # Tunaki
"559745", # Floern
"4687348", # FelixSFD
"6375113", # Bugs
"4622463", # angussidney
"563532", # Rob
"4050842", # Thaillie
"1915448" # g00glen00b
]
}
code_privileged_users = None
smokeDetector_user_id = {charcoal_room_id: "120914", meta_tavern_room_id: "266345",
socvr_room_id: "3735529", '111347': '3735529'}
censored_committer_names = {"3f4ed0f38df010ce300dba362fa63a62": "Undo1"}
commit = git_commit_info()
if md5(commit['author'][0].encode('utf-8')).hexdigest() in censored_committer_names:
commit['author'] = censored_committer_names[md5(commit['author'][0].encode('utf-8')).hexdigest()]
commit_with_author = "%s (%s: *%s*)" % (commit['id'],
commit['author'][0] if type(commit['author']) in [list, tuple]
else commit['author'],
commit['message'])
on_master = "HEAD detached" not in git_status()
charcoal_hq = None
tavern_on_the_meta = None
socvr = None
s = ""
s_reverted = ""
specialrooms = []
apiquota = -1
bodyfetcher = None
se_sites = []
users_chatting = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: [], '111347': []}
why_data = []
why_data_allspam = []
notifications = []
listen_to_these_if_edited = []
multiple_reporters = []
api_calls_per_site = {}
standby_message = ""
standby_mode = False
api_request_lock = threading.Lock()
num_posts_scanned = 0
post_scan_time = 0
posts_scan_stats_lock = threading.Lock()
config = RawConfigParser()
if os.path.isfile('config'):
config.read('config')
else:
config.read('config.ci')
latest_smokedetector_messages = {meta_tavern_room_id: [], charcoal_room_id: [], socvr_room_id: [],
'111347': []}
# environ_or_none defined in helpers.py
bot_name = environ_or_none("SMOKEDETECTOR_NAME") or "SmokeDetector"
bot_repository = environ_or_none("SMOKEDETECTOR_REPO") or "//github.com/Charcoal-SE/SmokeDetector"
chatmessage_prefix = "[{}]({})".format(bot_name, bot_repository)
site_id_dict = {}
post_site_id_to_question = {}
location = config.get("Config", "location")
metasmoke_ws = None
try:
metasmoke_host = config.get("Config", "metasmoke_host")
except NoOptionError:
metasmoke_host = None
log('info', "metasmoke host not found. Set it as metasmoke_host in the config file."
"See https://github.com/Charcoal-SE/metasmoke.")
try:
metasmoke_key = config.get("Config", "metasmoke_key")
except NoOptionError:
metasmoke_key = ""
log('info', "No metasmoke key found, which is okay if both are running on the same host")
try:
metasmoke_ws_host = config.get("Config", "metasmoke_ws_host")
except NoOptionError:
metasmoke_ws_host = ""
log('info', "No metasmoke websocket host found, which is okay if you're anti-websocket")
try:
github_username = config.get("Config", "github_username")
github_password = config.get("Config", "github_password")
except NoOptionError:
github_username = None
github_password = None
try:
perspective_key = config.get("Config", "perspective_key")
except NoOptionError:
perspective_key = None
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Queue
"""
import xml.sax
import urlparse
from boto.exception import SQSError
from boto.handler import XmlHandler
from boto.sqs.message import Message
from boto.resultset import ResultSet
class Queue:
def __init__(self, connection=None, url=None, message_class=Message):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def _id(self):
if self.url:
val = urlparse.urlparse(self.url)[2]
else:
val = self.url
return val
id = property(_id)
def _name(self):
if self.url:
val = urlparse.urlparse(self.url)[2].split('/')[2]
else:
val = self.url
return val
name = property(_name)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'QueueUrl':
self.url = value
elif name == 'VisibilityTimeout':
self.visibility_timeout = int(value)
else:
setattr(self, name, value)
def set_message_class(self, message_class):
"""
Set the message class that should be used when instantiating messages read
from the queue. By default, the class boto.sqs.message.Message is used but
this can be overriden with any class that behaves like a message.
:type message_class: Message-like class
:param message_class: The new Message class
"""
self.message_class = message_class
def get_attributes(self, attributes='All'):
"""
Retrieves attributes about this queue object and returns
them in an Attribute instance (subclass of a Dictionary).
:type attributes: string
:param attributes: String containing one of:
ApproximateNumberOfMessages,
ApproximateNumberOfMessagesNotVisible,
VisibilityTimeout,
CreatedTimestamp,
LastModifiedTimestamp,
Policy
:rtype: Attribute object
:return: An Attribute object which is a mapping type holding the
requested name/value pairs
"""
return self.connection.get_queue_attributes(self, attributes)
def set_attribute(self, attribute, value):
"""
Set a new value for an attribute of the Queue.
:type attribute: String
:param attribute: The name of the attribute you want to set. The
only valid value at this time is: VisibilityTimeout
:type value: int
:param value: The new value for the attribute.
For VisibilityTimeout the value must be an
integer number of seconds from 0 to 86400.
:rtype: bool
:return: True if successful, otherwise False.
"""
return self.connection.set_queue_attribute(self, attribute, value)
def get_timeout(self):
"""
Get the visibility timeout for the queue.
:rtype: int
:return: The number of seconds as an integer.
"""
a = self.get_attributes('VisibilityTimeout')
return int(a['VisibilityTimeout'])
def set_timeout(self, visibility_timeout):
"""
Set the visibility timeout for the queue.
:type visibility_timeout: int
:param visibility_timeout: The desired timeout in seconds
"""
retval = self.set_attribute('VisibilityTimeout', visibility_timeout)
if retval:
self.visibility_timeout = visibility_timeout
return retval
def add_permission(self, label, aws_account_id, action_name):
"""
Add a permission to a queue.
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
Maximum of 80 characters ``[0-9a-zA-Z_-]``
Example, AliceSendMessage
:type aws_account_id: str or unicode
:param principal_id: The AWS account number of the principal who will be given
permission. The principal must have an AWS account, but
does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
\*|SendMessage|ReceiveMessage|DeleteMessage|
ChangeMessageVisibility|GetQueueAttributes
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.add_permission(self, label, aws_account_id, action_name)
def remove_permission(self, label):
"""
Remove a permission from a queue.
:type label: str or unicode
:param label: The unique label associated with the permission being removed.
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.remove_permission(self, label)
def read(self, visibility_timeout=None):
"""
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
"""
rs = self.get_messages(1, visibility_timeout)
if len(rs) == 1:
return rs[0]
else:
return None
def write(self, message):
"""
Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written.
"""
new_msg = self.connection.send_message(self, message.get_body_encoded())
message.id = new_msg.id
message.md5 = new_msg.md5
return message
def new_message(self, body=''):
"""
Create new message of appropriate class.
:type body: message body
:param body: The body of the newly created message (optional).
:rtype: :class:`boto.sqs.message.Message`
:return: A new Message object
"""
m = self.message_class(self, body)
m.queue = self
return m
# get a variable number of messages, returns a list of messages
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None):
"""
Get a variable number of messages.
:type num_messages: int
:param num_messages: The maximum number of messages to read from the queue.
:type visibility_timeout: int
:param visibility_timeout: The VisibilityTimeout for the messages read.
:type attributes: list of strings
:param attributes: A list of additional attributes that will be returned
with the response. Valid values:
All
SenderId
SentTimestamp
ApproximateReceiveCount
ApproximateFirstReceiveTimestamp
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
"""
return self.connection.receive_message(self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes)
def delete_message(self, message):
"""
Delete a message from the queue.
:type message: :class:`boto.sqs.message.Message`
:param message: The :class:`boto.sqs.message.Message` object to delete.
:rtype: bool
:return: True if successful, False otherwise
"""
return self.connection.delete_message(self, message)
def delete(self):
"""
Delete the queue.
"""
return self.connection.delete_queue(self)
def clear(self, page_size=10, vtimeout=10):
"""Utility function to remove all messages from a queue"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
self.delete_message(m)
n += 1
l = self.get_messages(page_size, vtimeout)
return n
def count(self, page_size=10, vtimeout=10):
"""
Utility function to count the number of messages in a queue.
Note: This function now calls GetQueueAttributes to obtain
an 'approximate' count of the number of messages in a queue.
"""
a = self.get_attributes('ApproximateNumberOfMessages')
return int(a['ApproximateNumberOfMessages'])
def count_slow(self, page_size=10, vtimeout=10):
"""
Deprecated. This is the old 'count' method that actually counts
the messages by reading them all. This gives an accurate count but
is very slow for queues with non-trivial number of messasges.
Instead, use get_attribute('ApproximateNumberOfMessages') to take
advantage of the new SQS capability. This is retained only for
the unit tests.
"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
n += 1
l = self.get_messages(page_size, vtimeout)
return n
def dump_(self, file_name, page_size=10, vtimeout=10, sep='\n'):
"""Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors"""
fp = open(file_name, 'wb')
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
fp.write(m.get_body())
if sep:
fp.write(sep)
n += 1
l = self.get_messages(page_size, vtimeout)
fp.close()
return n
def save_to_file(self, fp, sep='\n'):
"""
Read all messages from the queue and persist them to file-like object.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
fp.write(m.get_body())
if sep:
fp.write(sep)
self.delete_message(m)
m = self.read()
return n
def save_to_filename(self, file_name, sep='\n'):
"""
Read all messages from the queue and persist them to local file.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
fp = open(file_name, 'wb')
n = self.save_to_file(fp, sep)
fp.close()
return n
# for backwards compatibility
save = save_to_filename
def save_to_s3(self, bucket):
"""
Read all messages from the queue and persist them to S3.
Messages are stored in the S3 bucket using a naming scheme of::
<queue_id>/<message_id>
Messages are deleted from the queue after being saved to S3.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
key = bucket.new_key('%s/%s' % (self.id, m.id))
key.set_contents_from_string(m.get_body())
self.delete_message(m)
m = self.read()
return n
def load_from_s3(self, bucket, prefix=None):
"""
Load messages previously saved to S3.
"""
n = 0
if prefix:
prefix = '%s/' % prefix
else:
prefix = '%s/' % self.id
rs = bucket.list(prefix=prefix)
for key in rs:
n += 1
m = self.new_message(key.get_contents_as_string())
self.write(m)
return n
def load_from_file(self, fp, sep='\n'):
"""Utility function to load messages from a file-like object to a queue"""
n = 0
body = ''
l = fp.readline()
while l:
if l == sep:
m = Message(self, body)
self.write(m)
n += 1
print 'writing message %d' % n
body = ''
else:
body = body + l
l = fp.readline()
return n
def load_from_filename(self, file_name, sep='\n'):
"""Utility function to load messages from a local filename to a queue"""
fp = open(file_name, 'rb')
n = self.load_file_file(fp, sep)
fp.close()
return n
# for backward compatibility
load = load_from_filename
Fixing an bug in save_to_s3/load_from_s3. Fixes issue 309.
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SQS Queue
"""
import xml.sax
import urlparse
from boto.exception import SQSError
from boto.handler import XmlHandler
from boto.sqs.message import Message
from boto.resultset import ResultSet
class Queue:
def __init__(self, connection=None, url=None, message_class=Message):
self.connection = connection
self.url = url
self.message_class = message_class
self.visibility_timeout = None
def _id(self):
if self.url:
val = urlparse.urlparse(self.url)[2]
else:
val = self.url
return val
id = property(_id)
def _name(self):
if self.url:
val = urlparse.urlparse(self.url)[2].split('/')[2]
else:
val = self.url
return val
name = property(_name)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'QueueUrl':
self.url = value
elif name == 'VisibilityTimeout':
self.visibility_timeout = int(value)
else:
setattr(self, name, value)
def set_message_class(self, message_class):
"""
Set the message class that should be used when instantiating messages read
from the queue. By default, the class boto.sqs.message.Message is used but
this can be overriden with any class that behaves like a message.
:type message_class: Message-like class
:param message_class: The new Message class
"""
self.message_class = message_class
def get_attributes(self, attributes='All'):
"""
Retrieves attributes about this queue object and returns
them in an Attribute instance (subclass of a Dictionary).
:type attributes: string
:param attributes: String containing one of:
ApproximateNumberOfMessages,
ApproximateNumberOfMessagesNotVisible,
VisibilityTimeout,
CreatedTimestamp,
LastModifiedTimestamp,
Policy
:rtype: Attribute object
:return: An Attribute object which is a mapping type holding the
requested name/value pairs
"""
return self.connection.get_queue_attributes(self, attributes)
def set_attribute(self, attribute, value):
"""
Set a new value for an attribute of the Queue.
:type attribute: String
:param attribute: The name of the attribute you want to set. The
only valid value at this time is: VisibilityTimeout
:type value: int
:param value: The new value for the attribute.
For VisibilityTimeout the value must be an
integer number of seconds from 0 to 86400.
:rtype: bool
:return: True if successful, otherwise False.
"""
return self.connection.set_queue_attribute(self, attribute, value)
def get_timeout(self):
"""
Get the visibility timeout for the queue.
:rtype: int
:return: The number of seconds as an integer.
"""
a = self.get_attributes('VisibilityTimeout')
return int(a['VisibilityTimeout'])
def set_timeout(self, visibility_timeout):
"""
Set the visibility timeout for the queue.
:type visibility_timeout: int
:param visibility_timeout: The desired timeout in seconds
"""
retval = self.set_attribute('VisibilityTimeout', visibility_timeout)
if retval:
self.visibility_timeout = visibility_timeout
return retval
def add_permission(self, label, aws_account_id, action_name):
"""
Add a permission to a queue.
:type label: str or unicode
:param label: A unique identification of the permission you are setting.
Maximum of 80 characters ``[0-9a-zA-Z_-]``
Example, AliceSendMessage
:type aws_account_id: str or unicode
:param principal_id: The AWS account number of the principal who will be given
permission. The principal must have an AWS account, but
does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification.
:type action_name: str or unicode
:param action_name: The action. Valid choices are:
\*|SendMessage|ReceiveMessage|DeleteMessage|
ChangeMessageVisibility|GetQueueAttributes
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.add_permission(self, label, aws_account_id, action_name)
def remove_permission(self, label):
"""
Remove a permission from a queue.
:type label: str or unicode
:param label: The unique label associated with the permission being removed.
:rtype: bool
:return: True if successful, False otherwise.
"""
return self.connection.remove_permission(self, label)
def read(self, visibility_timeout=None):
"""
Read a single message from the queue.
:type visibility_timeout: int
:param visibility_timeout: The timeout for this message in seconds
:rtype: :class:`boto.sqs.message.Message`
:return: A single message or None if queue is empty
"""
rs = self.get_messages(1, visibility_timeout)
if len(rs) == 1:
return rs[0]
else:
return None
def write(self, message):
"""
Add a single message to the queue.
:type message: Message
:param message: The message to be written to the queue
:rtype: :class:`boto.sqs.message.Message`
:return: The :class:`boto.sqs.message.Message` object that was written.
"""
new_msg = self.connection.send_message(self, message.get_body_encoded())
message.id = new_msg.id
message.md5 = new_msg.md5
return message
def new_message(self, body=''):
"""
Create new message of appropriate class.
:type body: message body
:param body: The body of the newly created message (optional).
:rtype: :class:`boto.sqs.message.Message`
:return: A new Message object
"""
m = self.message_class(self, body)
m.queue = self
return m
# get a variable number of messages, returns a list of messages
def get_messages(self, num_messages=1, visibility_timeout=None,
attributes=None):
"""
Get a variable number of messages.
:type num_messages: int
:param num_messages: The maximum number of messages to read from the queue.
:type visibility_timeout: int
:param visibility_timeout: The VisibilityTimeout for the messages read.
:type attributes: list of strings
:param attributes: A list of additional attributes that will be returned
with the response. Valid values:
All
SenderId
SentTimestamp
ApproximateReceiveCount
ApproximateFirstReceiveTimestamp
:rtype: list
:return: A list of :class:`boto.sqs.message.Message` objects.
"""
return self.connection.receive_message(self, number_messages=num_messages,
visibility_timeout=visibility_timeout,
attributes=attributes)
def delete_message(self, message):
"""
Delete a message from the queue.
:type message: :class:`boto.sqs.message.Message`
:param message: The :class:`boto.sqs.message.Message` object to delete.
:rtype: bool
:return: True if successful, False otherwise
"""
return self.connection.delete_message(self, message)
def delete(self):
"""
Delete the queue.
"""
return self.connection.delete_queue(self)
def clear(self, page_size=10, vtimeout=10):
"""Utility function to remove all messages from a queue"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
self.delete_message(m)
n += 1
l = self.get_messages(page_size, vtimeout)
return n
def count(self, page_size=10, vtimeout=10):
"""
Utility function to count the number of messages in a queue.
Note: This function now calls GetQueueAttributes to obtain
an 'approximate' count of the number of messages in a queue.
"""
a = self.get_attributes('ApproximateNumberOfMessages')
return int(a['ApproximateNumberOfMessages'])
def count_slow(self, page_size=10, vtimeout=10):
"""
Deprecated. This is the old 'count' method that actually counts
the messages by reading them all. This gives an accurate count but
is very slow for queues with non-trivial number of messasges.
Instead, use get_attribute('ApproximateNumberOfMessages') to take
advantage of the new SQS capability. This is retained only for
the unit tests.
"""
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
n += 1
l = self.get_messages(page_size, vtimeout)
return n
def dump_(self, file_name, page_size=10, vtimeout=10, sep='\n'):
"""Utility function to dump the messages in a queue to a file
NOTE: Page size must be < 10 else SQS errors"""
fp = open(file_name, 'wb')
n = 0
l = self.get_messages(page_size, vtimeout)
while l:
for m in l:
fp.write(m.get_body())
if sep:
fp.write(sep)
n += 1
l = self.get_messages(page_size, vtimeout)
fp.close()
return n
def save_to_file(self, fp, sep='\n'):
"""
Read all messages from the queue and persist them to file-like object.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
fp.write(m.get_body())
if sep:
fp.write(sep)
self.delete_message(m)
m = self.read()
return n
def save_to_filename(self, file_name, sep='\n'):
"""
Read all messages from the queue and persist them to local file.
Messages are written to the file and the 'sep' string is written
in between messages. Messages are deleted from the queue after
being written to the file.
Returns the number of messages saved.
"""
fp = open(file_name, 'wb')
n = self.save_to_file(fp, sep)
fp.close()
return n
# for backwards compatibility
save = save_to_filename
def save_to_s3(self, bucket):
"""
Read all messages from the queue and persist them to S3.
Messages are stored in the S3 bucket using a naming scheme of::
<queue_id>/<message_id>
Messages are deleted from the queue after being saved to S3.
Returns the number of messages saved.
"""
n = 0
m = self.read()
while m:
n += 1
key = bucket.new_key('%s/%s' % (self.id, m.id))
key.set_contents_from_string(m.get_body())
self.delete_message(m)
m = self.read()
return n
def load_from_s3(self, bucket, prefix=None):
"""
Load messages previously saved to S3.
"""
n = 0
if prefix:
prefix = '%s/' % prefix
else:
prefix = '%s/' % self.id[1:]
rs = bucket.list(prefix=prefix)
for key in rs:
n += 1
m = self.new_message(key.get_contents_as_string())
self.write(m)
return n
def load_from_file(self, fp, sep='\n'):
"""Utility function to load messages from a file-like object to a queue"""
n = 0
body = ''
l = fp.readline()
while l:
if l == sep:
m = Message(self, body)
self.write(m)
n += 1
print 'writing message %d' % n
body = ''
else:
body = body + l
l = fp.readline()
return n
def load_from_filename(self, file_name, sep='\n'):
"""Utility function to load messages from a local filename to a queue"""
fp = open(file_name, 'rb')
n = self.load_file_file(fp, sep)
fp.close()
return n
# for backward compatibility
load = load_from_filename
|
import os.path
from datetime import datetime
from flask import json
from flaskext.sqlalchemy import SQLAlchemy
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
openid_url = db.Column(db.Text())
name = db.Column(db.Text())
email = db.Column(db.Text())
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text())
class Property(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person',
backref=db.backref('properties', lazy='dynamic'))
name = db.Column(db.String(30))
value = db.Column(db.Text())
def get_persons():
results = {}
for person in Person.query.all():
results[person.id] = person_data = {'name': person.name}
for prop in person.properties.all():
person_data[prop.name] = prop.value
return results
def import_fixture(flush=True):
data_path = os.path.join(os.path.dirname(__file__), 'data')
fixture_path = os.path.join(data_path, 'fixture.json')
if flush:
db.drop_all()
db.create_all()
with open(fixture_path, 'rb') as f:
fixture = json.load(f)
for person_data in fixture:
person = Person(id=person_data.pop('id'), name=person_data.pop('name'))
db.session.add(person)
for key in person_data:
prop = Property(person=person, name=key, value=person_data[key])
db.session.add(prop)
db.session.commit()
def import_senators():
data_path = os.path.join(os.path.dirname(__file__), 'data')
senators_path = os.path.join(data_path, 'senatori_email.json')
with open(senators_path, 'rb') as f:
senatori = json.load(f)
for person_data in senatori:
person = Person(name=person_data['name'])
db.session.add(person)
emails = person_data['emails']
if emails:
prop = Property(person=person, name='email', value=emails[0])
db.session.add(prop)
db.session.commit()
def get_user(openid_url):
return User.query.filter_by(openid_url=openid_url).first()
def get_update_user(openid_url, name, email):
user = get_user(openid_url)
if user is None:
user = User(openid_url=openid_url)
if (name, email) != (user.name, user.email):
user.name = name
user.email = email
db.session.add(user)
db.session.commit()
return user
new ContentVersion table
import os.path
from datetime import datetime
from flask import json
from flaskext.sqlalchemy import SQLAlchemy
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
openid_url = db.Column(db.Text())
name = db.Column(db.Text())
email = db.Column(db.Text())
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text())
def get_content(self):
version = self.versions.order_by(ContentVersion.time.desc()).first()
return {} if version is None else json.loads(version.content)
class ContentVersion(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person',
backref=db.backref('versions', lazy='dynamic'))
content = db.Column(db.LargeBinary)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User')
time = db.Column(db.DateTime)
class Property(db.Model):
id = db.Column(db.Integer, primary_key=True)
person_id = db.Column(db.Integer, db.ForeignKey('person.id'))
person = db.relationship('Person',
backref=db.backref('properties', lazy='dynamic'))
name = db.Column(db.String(30))
value = db.Column(db.Text())
def get_persons():
results = {}
for person in Person.query.all():
results[person.id] = person_data = {'name': person.name}
for prop in person.properties.all():
person_data[prop.name] = prop.value
return results
def import_fixture(flush=True):
data_path = os.path.join(os.path.dirname(__file__), 'data')
fixture_path = os.path.join(data_path, 'fixture.json')
if flush:
db.drop_all()
db.create_all()
with open(fixture_path, 'rb') as f:
fixture = json.load(f)
for person_data in fixture:
person = Person(id=person_data.pop('id'), name=person_data.pop('name'))
db.session.add(person)
for key in person_data:
prop = Property(person=person, name=key, value=person_data[key])
db.session.add(prop)
db.session.commit()
def import_senators():
data_path = os.path.join(os.path.dirname(__file__), 'data')
senators_path = os.path.join(data_path, 'senatori_email.json')
with open(senators_path, 'rb') as f:
senatori = json.load(f)
for person_data in senatori:
person = Person(name=person_data['name'])
db.session.add(person)
emails = person_data['emails']
if emails:
prop = Property(person=person, name='email', value=emails[0])
db.session.add(prop)
db.session.commit()
def get_user(openid_url):
return User.query.filter_by(openid_url=openid_url).first()
def get_update_user(openid_url, name, email):
user = get_user(openid_url)
if user is None:
user = User(openid_url=openid_url)
if (name, email) != (user.name, user.email):
user.name = name
user.email = email
db.session.add(user)
db.session.commit()
return user
|
Do not mix byte str with unicode type
|
__version__ = '1.1.8'
__build__ = '16352a8-2022-05-09'
Bumped version to 1.1.9
- No plan to release it, is a WIP version
__version__ = '1.1.9'
__build__ = '1acbdb8-2022-05-09'
|
import numpy as np
import SimPEG
from SimPEG import Utils, Mesh
from . import SrcDC as Src # Pole
from . import RxDC as Rx
from .SurveyDC import Survey, Survey_ky
import matplotlib.pyplot as plt
import matplotlib
class IO(object):
"""Input and Output for DC"""
# TODO: use properties
Alocs = None
Blocs = None
Mlocs = None
Nlocs = None
uniqElecLocs = None
geometry = "SURFACE"
dataType = 'volt' # "volt" and "appResistivity"
topoFunc = None
mesh = None
V = None
appResistivity = None
dobs = None
grids = None
G = None
dx = None
dy = None
dz = None
npadx = 5
npady = 5
npadz = 5
padratex = 1.3
padratey = 1.3
padratez = 1.3
ncellperdipole = 4
def fromABMN_to_survey(self, A, B, M, N, surveyType, dobs=None, dataType="volt", fname=None, dim=2):
"""
read ABMN location and data (V or appResistivity)
"""
self.Alocs = A.copy()
self.Blocs = B.copy()
self.Mlocs = M.copy()
self.Nlocs = N.copy()
self.surveyType = surveyType
self.dataType = dataType
self.dim = dim
uniqSrc = Utils.uniqueRows(np.c_[self.Alocs, self.Blocs])
uniqElec = SimPEG.Utils.uniqueRows(
np.vstack((self.Alocs, self.Blocs, self.Mlocs, self.Nlocs))
)
self.uniqElecLocs = uniqElec[0]
nSrc = uniqSrc[0].shape[0]
ndata = self.Alocs.shape[0]
if dim == 2:
srcLists = []
sortinds = []
for iSrc in range (nSrc):
inds = uniqSrc[2] == iSrc
sortinds.append(np.arange(ndata)[inds])
locsM = self.Mlocs[inds, :]
locsN = self.Nlocs[inds, :]
if (surveyType == 'dipole-dipole') or (surveyType == 'pole-dipole'):
rx = Rx.Dipole_ky(locsM, locsN)
elif (surveyType == 'dipole-pole') or (surveyType == 'pole-pole'):
rx = Rx.Pole_ky(locsM)
locA = uniqSrc[0][iSrc, :2]
locB = uniqSrc[0][iSrc, 2:]
if (surveyType == 'dipole-dipole') or (surveyType == 'dipole-pole'):
src = Src.Dipole([rx], locA, locB)
elif (surveyType == 'pole-dipole') or (surveyType == 'pole-pole'):
src = Src.Pole([rx], locA)
srcLists.append(src)
self.sortinds = np.hstack(sortinds)
survey = Survey_ky(srcLists)
self.Alocs = self.Alocs[self.sortinds, :]
self.Blocs = self.Blocs[self.sortinds, :]
self.Mlocs = self.Mlocs[self.sortinds, :]
self.Nlocs = self.Nlocs[self.sortinds, :]
self.dobs = dobs[self.sortinds]
if self.dataType == "volt":
self.V = self.dobs.copy()
G = self.getGeometricFactor()
self.appResistivity = self.V / G
elif self.dataType == "appResistivity":
G = self.getGeometricFactor()
self.appResistivity = self.dobs.copy()
self.V = self.appResistivity * self.G
self.dobs = self.V.copy()
midAB = (self.Alocs[:, 0] + self.Blocs[:, 0])*0.5
midMN = (self.Mlocs[:, 0] + self.Nlocs[:, 0])*0.5
z = abs(midAB-midMN)*0.5
x = (midAB+midMN)*0.5
self.grids = np.c_[x, z]
else:
raise NotImplementedError()
return survey
def getGeometricFactor(self):
if self.geometry == 'SURFACE':
if self.dim == 2:
MA = abs(self.Alocs[:, 0] - self.Mlocs[:, 0])
MB = abs(self.Blocs[:, 0] - self.Mlocs[:, 0])
NA = abs(self.Alocs[:, 0] - self.Nlocs[:, 0])
NB = abs(self.Blocs[:, 0] - self.Nlocs[:, 0])
elif self.dim == 3:
MA = np.sqrt(
abs(self.Alocs[:, 0] - self.Mlocs[:, 0])**2. +
abs(self.Alocs[:, 1] - self.Mlocs[:, 1])**2.
)
MB = np.sqrt(
abs(self.Blocs[:, 0] - self.Mlocs[:, 0])**2. +
abs(self.Blocs[:, 1] - self.Mlocs[:, 1])**2.
)
NA = np.sqrt(
abs(self.Alocs[:, 0] - self.Nlocs[:, 0])**2. +
abs(self.Alocs[:, 1] - self.Nlocs[:, 1])**2.
)
NB = np.sqrt(
abs(self.Blocs[:, 0] - self.Nlocs[:, 0])**2. +
abs(self.Blocs[:, 1] - self.Nlocs[:, 1])**2.
)
if self.surveyType == 'dipole-dipole':
self.G = 1./(2*np.pi) * (1./MA - 1./MB + 1./NB - 1./NA)
elif surveyType == 'pole-dipole':
self.G = 1./(2*np.pi) * (1./MA - 1./NA)
elif surveyType == 'dipole-pole':
self.G = 1./(2*np.pi) * (1./MA - 1./MB)
elif surveyType == 'pole-pole':
self.G = 1./(2*np.pi) * (1./MA)
elif self.geometry == 'BOREHOLE':
raise NotImplementedError()
return self.G
def setMesh(self, topo=None, dx=None, dz=None, nSpacing=None, corezlength=None, npadx=7, npadz=7, padratex=1.3, padratez=1.3, ncellperdipole=4, meshType='TensorMesh', dim=2):
if meshType == 'TreeMesh':
raise NotImplementedError()
if dim == 2:
a = abs(np.diff(np.sort(self.uniqElecLocs[:, 0]))).min()
lineLength = abs(self.uniqElecLocs[:, 0].max()-self.uniqElecLocs[:, 0].min())
dx_ideal = a/ncellperdipole
if dx is None:
dx = dx_ideal
if dz is None:
dz = dx*0.5
x0 = self.uniqElecLocs[:, 0].min()
if topo is None:
locs = self.uniqElecLocs
else:
locs = np.vstack((topo, self.uniqElecLocs))
zmax = locs[:, 1].max()
zmin = locs[:, 1].min()
if dx > dx_ideal:
print (">>Input dx is greater than expected")
print (
(": You may need %.1e m cell, that is %i cells per %.1e m dipole legnth") %
(dx_ideal, ncellperdipole, a)
)
# TODO: conditional statement for dz?
# Inject variables into the class
self.dx = dx
self.dz = dz
self.npadx = npadx
self.npadz = npadz
self.padratex = padratex
self.padratez = padratez
self.ncellperdipole = ncellperdipole
# 3 cells each for buffer
corexlength = lineLength + dx * 6
if corezlength is None:
corezlength = self.grids[:, 1].max() + zmax - zmin
x0core = x0 - dx * 3
ncx = np.floor(corexlength/dx)
ncz = np.floor(corezlength/dz)
hx = [(dx, npadx, -padratex), (dx, ncx), (dx, npadx, padratex)]
hz = [(dz, npadz, -padratez), (dz, ncz)]
x0_mesh = -(
(dx * 1.3 ** (np.arange(npadx)+1)).sum() + dx * 3 - x0
)
z0_mesh = -((dz * 1.3 ** (np.arange(npadz)+1)).sum() + dz * ncz) + zmax
mesh = Mesh.TensorMesh([hx, hz], x0=[x0_mesh, z0_mesh])
actind = Utils.surface2ind_topo(mesh, locs)
print (mesh)
elif dim == 3:
raise NotImplementedError()
else:
raise NotImplementedError()
return mesh, actind
def plotPseudoSection(self, dataType="appResistivity", scale="log", dataloc=True,aspect_ratio=2, cmap="jet", ncontour=10, ax=None):
matplotlib.rcParams['font.size'] = 12
if self.dim == 2:
fig = plt.figure(figsize = (10, 5))
if ax is None:
ax = plt.subplot(111)
if dataType == "appResistivity":
val = self.appResistivity.copy()
label = "Apparent Res. ($\Omega$m)"
elif dataType == "volt":
val = self.dobs.copy()
label = "Voltage (V)"
else:
raise NotImplementedError()
vmin, vmax = val.min(), val.max()
if scale == "log":
fmt = "10$^{%.1f}$"
elif scale == "linear":
fmt = "%.1e"
else:
raise NotImplementedError()
out = Utils.plot2Ddata(
self.grids, val,
contourOpts={'cmap':cmap},
ax = ax,
dataloc=dataloc,
scale=scale,
ncontour=ncontour
)
ax.invert_yaxis()
ax.set_xlabel("x (m)")
ax.set_yticklabels([])
ax.set_ylabel("n-spacing")
cb = plt.colorbar(out[0], fraction=0.01, format=fmt)
cb.set_label(label)
ax.set_aspect(aspect_ratio)
plt.tight_layout()
plt.show()
change factor for depth 1/3
import numpy as np
import SimPEG
from SimPEG import Utils, Mesh
from . import SrcDC as Src # Pole
from . import RxDC as Rx
from .SurveyDC import Survey, Survey_ky
import matplotlib.pyplot as plt
import matplotlib
class IO(object):
"""Input and Output for DC"""
# TODO: use properties
Alocs = None
Blocs = None
Mlocs = None
Nlocs = None
uniqElecLocs = None
geometry = "SURFACE"
dataType = 'volt' # "volt" and "appResistivity"
topoFunc = None
mesh = None
V = None
appResistivity = None
dobs = None
grids = None
G = None
dx = None
dy = None
dz = None
npadx = 5
npady = 5
npadz = 5
padratex = 1.3
padratey = 1.3
padratez = 1.3
ncellperdipole = 4
def fromABMN_to_survey(self, A, B, M, N, surveyType, dobs=None, dataType="volt", fname=None, dim=2):
"""
read ABMN location and data (V or appResistivity)
"""
self.Alocs = A.copy()
self.Blocs = B.copy()
self.Mlocs = M.copy()
self.Nlocs = N.copy()
self.surveyType = surveyType
self.dataType = dataType
self.dim = dim
uniqSrc = Utils.uniqueRows(np.c_[self.Alocs, self.Blocs])
uniqElec = SimPEG.Utils.uniqueRows(
np.vstack((self.Alocs, self.Blocs, self.Mlocs, self.Nlocs))
)
self.uniqElecLocs = uniqElec[0]
nSrc = uniqSrc[0].shape[0]
ndata = self.Alocs.shape[0]
if dim == 2:
srcLists = []
sortinds = []
for iSrc in range (nSrc):
inds = uniqSrc[2] == iSrc
sortinds.append(np.arange(ndata)[inds])
locsM = self.Mlocs[inds, :]
locsN = self.Nlocs[inds, :]
if (surveyType == 'dipole-dipole') or (surveyType == 'pole-dipole'):
rx = Rx.Dipole_ky(locsM, locsN)
elif (surveyType == 'dipole-pole') or (surveyType == 'pole-pole'):
rx = Rx.Pole_ky(locsM)
locA = uniqSrc[0][iSrc, :2]
locB = uniqSrc[0][iSrc, 2:]
if (surveyType == 'dipole-dipole') or (surveyType == 'dipole-pole'):
src = Src.Dipole([rx], locA, locB)
elif (surveyType == 'pole-dipole') or (surveyType == 'pole-pole'):
src = Src.Pole([rx], locA)
srcLists.append(src)
self.sortinds = np.hstack(sortinds)
survey = Survey_ky(srcLists)
self.Alocs = self.Alocs[self.sortinds, :]
self.Blocs = self.Blocs[self.sortinds, :]
self.Mlocs = self.Mlocs[self.sortinds, :]
self.Nlocs = self.Nlocs[self.sortinds, :]
self.dobs = dobs[self.sortinds]
if self.dataType == "volt":
self.V = self.dobs.copy()
G = self.getGeometricFactor()
self.appResistivity = self.V / G
elif self.dataType == "appResistivity":
G = self.getGeometricFactor()
self.appResistivity = self.dobs.copy()
self.V = self.appResistivity * self.G
self.dobs = self.V.copy()
midAB = (self.Alocs[:, 0] + self.Blocs[:, 0])*0.5
midMN = (self.Mlocs[:, 0] + self.Nlocs[:, 0])*0.5
z = abs(midAB-midMN)*1./3.
x = (midAB+midMN)*0.5
self.grids = np.c_[x, z]
else:
raise NotImplementedError()
return survey
def getGeometricFactor(self):
if self.geometry == 'SURFACE':
if self.dim == 2:
MA = abs(self.Alocs[:, 0] - self.Mlocs[:, 0])
MB = abs(self.Blocs[:, 0] - self.Mlocs[:, 0])
NA = abs(self.Alocs[:, 0] - self.Nlocs[:, 0])
NB = abs(self.Blocs[:, 0] - self.Nlocs[:, 0])
elif self.dim == 3:
MA = np.sqrt(
abs(self.Alocs[:, 0] - self.Mlocs[:, 0])**2. +
abs(self.Alocs[:, 1] - self.Mlocs[:, 1])**2.
)
MB = np.sqrt(
abs(self.Blocs[:, 0] - self.Mlocs[:, 0])**2. +
abs(self.Blocs[:, 1] - self.Mlocs[:, 1])**2.
)
NA = np.sqrt(
abs(self.Alocs[:, 0] - self.Nlocs[:, 0])**2. +
abs(self.Alocs[:, 1] - self.Nlocs[:, 1])**2.
)
NB = np.sqrt(
abs(self.Blocs[:, 0] - self.Nlocs[:, 0])**2. +
abs(self.Blocs[:, 1] - self.Nlocs[:, 1])**2.
)
if self.surveyType == 'dipole-dipole':
self.G = 1./(2*np.pi) * (1./MA - 1./MB + 1./NB - 1./NA)
elif surveyType == 'pole-dipole':
self.G = 1./(2*np.pi) * (1./MA - 1./NA)
elif surveyType == 'dipole-pole':
self.G = 1./(2*np.pi) * (1./MA - 1./MB)
elif surveyType == 'pole-pole':
self.G = 1./(2*np.pi) * (1./MA)
elif self.geometry == 'BOREHOLE':
raise NotImplementedError()
return self.G
def setMesh(self, topo=None, dx=None, dz=None, nSpacing=None, corezlength=None, npadx=7, npadz=7, padratex=1.3, padratez=1.3, ncellperdipole=4, meshType='TensorMesh', dim=2):
if meshType == 'TreeMesh':
raise NotImplementedError()
if dim == 2:
a = abs(np.diff(np.sort(self.uniqElecLocs[:, 0]))).min()
lineLength = abs(self.uniqElecLocs[:, 0].max()-self.uniqElecLocs[:, 0].min())
dx_ideal = a/ncellperdipole
if dx is None:
dx = dx_ideal
if dz is None:
dz = dx*0.5
x0 = self.uniqElecLocs[:, 0].min()
if topo is None:
locs = self.uniqElecLocs
else:
locs = np.vstack((topo, self.uniqElecLocs))
zmax = locs[:, 1].max()
zmin = locs[:, 1].min()
if dx > dx_ideal:
print (">>Input dx is greater than expected")
print (
(": You may need %.1e m cell, that is %i cells per %.1e m dipole legnth") %
(dx_ideal, ncellperdipole, a)
)
# TODO: conditional statement for dz?
# Inject variables into the class
self.dx = dx
self.dz = dz
self.npadx = npadx
self.npadz = npadz
self.padratex = padratex
self.padratez = padratez
self.ncellperdipole = ncellperdipole
# 3 cells each for buffer
corexlength = lineLength + dx * 6
if corezlength is None:
corezlength = self.grids[:, 1].max() + zmax - zmin
x0core = x0 - dx * 3
ncx = np.floor(corexlength/dx)
ncz = np.floor(corezlength/dz)
hx = [(dx, npadx, -padratex), (dx, ncx), (dx, npadx, padratex)]
hz = [(dz, npadz, -padratez), (dz, ncz)]
x0_mesh = -(
(dx * 1.3 ** (np.arange(npadx)+1)).sum() + dx * 3 - x0
)
z0_mesh = -((dz * 1.3 ** (np.arange(npadz)+1)).sum() + dz * ncz) + zmax
mesh = Mesh.TensorMesh([hx, hz], x0=[x0_mesh, z0_mesh])
actind = Utils.surface2ind_topo(mesh, locs)
print (mesh)
elif dim == 3:
raise NotImplementedError()
else:
raise NotImplementedError()
return mesh, actind
def plotPseudoSection(self, dataType="appResistivity", scale="log", dataloc=True,aspect_ratio=2, cmap="jet", ncontour=10, ax=None):
matplotlib.rcParams['font.size'] = 12
if self.dim == 2:
fig = plt.figure(figsize = (10, 5))
if ax is None:
ax = plt.subplot(111)
if dataType == "appResistivity":
val = self.appResistivity.copy()
label = "Apparent Res. ($\Omega$m)"
elif dataType == "volt":
val = self.dobs.copy()
label = "Voltage (V)"
else:
raise NotImplementedError()
vmin, vmax = val.min(), val.max()
if scale == "log":
fmt = "10$^{%.1f}$"
elif scale == "linear":
fmt = "%.1e"
else:
raise NotImplementedError()
out = Utils.plot2Ddata(
self.grids, val,
contourOpts={'cmap':cmap},
ax = ax,
dataloc=dataloc,
scale=scale,
ncontour=ncontour
)
ax.invert_yaxis()
ax.set_xlabel("x (m)")
ax.set_yticklabels([])
ax.set_ylabel("n-spacing")
cb = plt.colorbar(out[0], fraction=0.01, format=fmt)
cb.set_label(label)
ax.set_aspect(aspect_ratio)
plt.tight_layout()
plt.show()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
import math
import re
import sys
import time
from datetime import tzinfo, timedelta, datetime, date
from locale import getlocale, LC_TIME
try:
import babel
except ImportError:
babel = None
def get_known_locales():
return []
else:
from babel import Locale
from babel.core import LOCALE_ALIASES
from babel.dates import (
format_datetime as babel_format_datetime,
format_date as babel_format_date,
format_time as babel_format_time,
get_datetime_format, get_date_format,
get_time_format, get_month_names,
get_period_names, get_day_names
)
try:
from babel.localedata import list as get_known_locales
except ImportError:
from babel.localedata import locale_identifiers as get_known_locales
from trac.core import TracError
from trac.util.text import to_unicode, getpreferredencoding
from trac.util.translation import _, ngettext
# Date/time utilities
# -- conversion
def to_datetime(t, tzinfo=None):
"""Convert ``t`` into a `datetime` object in the ``tzinfo`` timezone.
If no ``tzinfo`` is given, the local timezone `localtz` will be used.
``t`` is converted using the following rules:
- If ``t`` is already a `datetime` object,
- if it is timezone-"naive", it is localized to ``tzinfo``
- if it is already timezone-aware, ``t`` is mapped to the given
timezone (`datetime.datetime.astimezone`)
- If ``t`` is None, the current time will be used.
- If ``t`` is a number, it is interpreted as a timestamp.
Any other input will trigger a `TypeError`.
All returned datetime instances are timezone aware and normalized.
"""
tz = tzinfo or localtz
if t is None:
dt = datetime.now(tz)
elif isinstance(t, datetime):
if t.tzinfo:
dt = t.astimezone(tz)
else:
dt = tz.localize(t)
elif isinstance(t, date):
dt = tz.localize(datetime(t.year, t.month, t.day))
elif isinstance(t, (int, long, float)):
if not (_min_ts <= t <= _max_ts):
# Handle microsecond timestamps for 0.11 compatibility
t *= 0.000001
if t < 0 and isinstance(t, float):
# Work around negative fractional times bug in Python 2.4
# http://bugs.python.org/issue1646728
frac, integer = math.modf(t)
dt = datetime.fromtimestamp(integer - 1, tz) + \
timedelta(seconds=frac + 1)
else:
dt = datetime.fromtimestamp(t, tz)
if dt:
return tz.normalize(dt)
raise TypeError('expecting datetime, int, long, float, or None; got %s' %
type(t))
def to_timestamp(dt):
"""Return the corresponding POSIX timestamp"""
if dt:
diff = dt - _epoc
return diff.days * 86400 + diff.seconds
else:
return 0
def to_utimestamp(dt):
"""Return a microsecond POSIX timestamp for the given `datetime`."""
if not dt:
return 0
diff = dt - _epoc
return (diff.days * 86400000000L + diff.seconds * 1000000
+ diff.microseconds)
def from_utimestamp(ts):
"""Return the `datetime` for the given microsecond POSIX timestamp."""
return _epoc + timedelta(microseconds=ts or 0)
# -- formatting
_units = (
(3600*24*365, lambda r: ngettext('%(num)d year', '%(num)d years', r)),
(3600*24*30, lambda r: ngettext('%(num)d month', '%(num)d months', r)),
(3600*24*7, lambda r: ngettext('%(num)d week', '%(num)d weeks', r)),
(3600*24, lambda r: ngettext('%(num)d day', '%(num)d days', r)),
(3600, lambda r: ngettext('%(num)d hour', '%(num)d hours', r)),
(60, lambda r: ngettext('%(num)d minute', '%(num)d minutes', r)))
def pretty_timedelta(time1, time2=None, resolution=None):
"""Calculate time delta between two `datetime` objects.
(the result is somewhat imprecise, only use for prettyprinting).
If either `time1` or `time2` is None, the current time will be used
instead.
"""
time1 = to_datetime(time1)
time2 = to_datetime(time2)
if time1 > time2:
time2, time1 = time1, time2
diff = time2 - time1
age_s = int(diff.days * 86400 + diff.seconds)
if resolution and age_s < resolution:
return ''
if age_s <= 60 * 1.9:
return ngettext('%(num)i second', '%(num)i seconds', age_s)
for u, format_units in _units:
r = float(age_s) / float(u)
if r >= 1.9:
r = int(round(r))
return format_units(r)
return ''
_BABEL_FORMATS = {
'datetime': {'short': '%x %H:%M', 'medium': '%x %X', 'long': '%x %X',
'full': '%x %X'},
'date': {'short': '%x', 'medium': '%x', 'long': '%x', 'full': '%x'},
'time': {'short': '%H:%M', 'medium': '%X', 'long': '%X', 'full': '%X'},
}
_STRFTIME_HINTS = {'%x %X': 'datetime', '%x': 'date', '%X': 'time'}
def _format_datetime_without_babel(t, format):
text = t.strftime(str(format))
encoding = getlocale(LC_TIME)[1] or getpreferredencoding() \
or sys.getdefaultencoding()
return unicode(text, encoding, 'replace')
def _format_datetime_iso8601(t, format, hint):
if format != 'full':
t = t.replace(microsecond=0)
text = t.isoformat() # YYYY-MM-DDThh:mm:ss.SSSSSS±hh:mm
if format == 'short':
text = text[:16] # YYYY-MM-DDThh:mm
elif format == 'medium':
text = text[:19] # YYYY-MM-DDThh:mm:ss
elif text.endswith('+00:00'):
text = text[:-6] + 'Z'
if hint == 'date':
text = text.split('T', 1)[0]
elif hint == 'time':
text = text.split('T', 1)[1]
return unicode(text, 'ascii')
def _format_datetime(t, format, tzinfo, locale, hint):
t = to_datetime(t, tzinfo or localtz)
if format == 'iso8601':
return _format_datetime_iso8601(t, 'long', hint)
if format in ('iso8601date', 'iso8601time'):
return _format_datetime_iso8601(t, 'long', format[7:])
if locale == 'iso8601':
if format is None:
format = 'long'
elif format in _STRFTIME_HINTS:
hint = _STRFTIME_HINTS[format]
format = 'long'
if format in ('short', 'medium', 'long', 'full'):
return _format_datetime_iso8601(t, format, hint)
return _format_datetime_without_babel(t, format)
if babel and locale:
if format is None:
format = 'medium'
elif format in _STRFTIME_HINTS:
hint = _STRFTIME_HINTS[format]
format = 'medium'
if format in ('short', 'medium', 'long', 'full'):
if hint == 'datetime':
return babel_format_datetime(t, format, None, locale)
if hint == 'date':
return babel_format_date(t, format, locale)
if hint == 'time':
return babel_format_time(t, format, None, locale)
format = _BABEL_FORMATS[hint].get(format, format)
return _format_datetime_without_babel(t, format)
def format_datetime(t=None, format='%x %X', tzinfo=None, locale=None):
"""Format the `datetime` object `t` into an `unicode` string
If `t` is None, the current time will be used.
The formatting will be done using the given `format`, which consist
of conventional `strftime` keys. In addition the format can be 'iso8601'
to specify the international date format (compliant with RFC 3339).
`tzinfo` will default to the local timezone if left to `None`.
"""
return _format_datetime(t, format, tzinfo, locale, 'datetime')
def format_date(t=None, format='%x', tzinfo=None, locale=None):
"""Convenience method for formatting the date part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'date')
def format_time(t=None, format='%X', tzinfo=None, locale=None):
"""Convenience method for formatting the time part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'time')
def get_date_format_hint(locale=None):
"""Present the default format used by `format_date` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return 'YYYY-MM-DD'
if babel and locale:
format = get_date_format('medium', locale=locale)
return format.pattern
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1)
def get_datetime_format_hint(locale=None):
"""Present the default format used by `format_datetime` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return u'YYYY-MM-DDThh:mm:ss±hh:mm'
if babel and locale:
date_pattern = get_date_format('medium', locale=locale).pattern
time_pattern = get_time_format('medium', locale=locale).pattern
format = get_datetime_format('medium', locale=locale)
return format.replace('{0}', time_pattern) \
.replace('{1}', date_pattern)
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'a', 1)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1) \
.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_month_names_jquery_ui(req):
"""Get the month names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
month_names = {}
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
month_names[width] = [names[i + 1] for i in xrange(12)]
return month_names
return {
'wide': (
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'),
'abbreviated': (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'),
}
def get_day_names_jquery_ui(req):
"""Get the day names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
day_names = {}
for width in ('wide', 'abbreviated', 'narrow'):
names = get_day_names(width, locale=locale)
day_names[width] = [names[(i + 6) % 7] for i in xrange(7)]
return day_names
return {
'wide': ('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday'),
'abbreviated': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
'narrow': ('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa'),
}
def get_date_format_jquery_ui(locale):
"""Get the date format for the jQuery UI datepicker library."""
if locale == 'iso8601':
return 'yy-mm-dd'
if babel and locale:
values = {'yyyy': 'yy', 'y': 'yy', 'M': 'm', 'MM': 'mm', 'MMM': 'M',
'd': 'd', 'dd': 'dd'}
return get_date_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'yy', 1).replace('99', 'y', 1) \
.replace('10', 'mm', 1).replace('29', 'dd', 1)
def get_time_format_jquery_ui(locale):
"""Get the time format for the jQuery UI timepicker addon."""
if locale == 'iso8601':
return 'hh:mm:ssz' # XXX timepicker doesn't support 'ISO_8601'
if babel and locale:
values = {'h': 'h', 'hh': 'hh', 'H': 'h', 'HH': 'hh',
'm': 'm', 'mm': 'mm', 's': 's', 'ss': 'ss',
'a': 'TT'}
return get_time_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_time(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'TT', 1)
return tmpl.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_timezone_list_jquery_ui(t=None):
"""Get timezone list for jQuery timepicker addon"""
t = datetime.now(utc) if t is None else utc.localize(t)
zones = set(t.astimezone(get_timezone(tz)).strftime('%z')
for tz in all_timezones)
return [{'value': 'Z', 'label': '+00:00'} \
if zone == '+0000' else zone[:-2] + ':' + zone[-2:]
for zone in sorted(zones, key=lambda tz: int(tz))]
def get_first_week_day_jquery_ui(req):
"""Get first week day for jQuery date picker"""
locale = req.lc_time
if locale == 'iso8601':
return 1 # Monday
if babel and locale:
if not locale.territory and locale.language in LOCALE_ALIASES:
locale = Locale.parse(LOCALE_ALIASES[locale.language])
return (locale.first_week_day + 1) % 7
return 0 # Sunday
def is_24_hours(locale):
"""Returns `True` for 24 hour time formats."""
if locale == 'iso8601':
return True
t = datetime(1999, 10, 29, 23, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc, locale=locale)
return '23' in tmpl
def http_date(t=None):
"""Format `datetime` object `t` as a rfc822 timestamp"""
t = to_datetime(t, utc)
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
return '%s, %02d %s %04d %02d:%02d:%02d GMT' % (
weekdays[t.weekday()], t.day, months[t.month - 1], t.year,
t.hour, t.minute, t.second)
# -- parsing
_ISO_8601_RE = re.compile(r'''
(\d\d\d\d)(?:-?(\d\d)(?:-?(\d\d))?)? # date
(?:
[T ]
(\d\d)(?::?(\d\d)(?::?(\d\d) # time
(?:[,.](\d{1,6}))?)?)? # microseconds
)?
(Z?(?:([-+])?(\d\d):?(\d\d)?)?)?$ # timezone
''', re.VERBOSE)
def _parse_date_iso8601(text, tzinfo):
match = _ISO_8601_RE.match(text)
if match:
try:
g = match.groups()
years = g[0]
months = g[1] or '01'
days = g[2] or '01'
hours, minutes, seconds, useconds = [x or '00' for x in g[3:7]]
useconds = (useconds + '000000')[:6]
z, tzsign, tzhours, tzminutes = g[7:11]
if z:
tz = timedelta(hours=int(tzhours or '0'),
minutes=int(tzminutes or '0')).seconds / 60
if tz == 0:
tzinfo = utc
else:
tzinfo = FixedOffset(-tz if tzsign == '-' else tz,
'%s%s:%s' %
(tzsign, tzhours, tzminutes))
tm = [int(x) for x in (years, months, days,
hours, minutes, seconds, useconds)]
t = tzinfo.localize(datetime(*tm))
return tzinfo.normalize(t)
except ValueError:
pass
return None
def parse_date(text, tzinfo=None, locale=None, hint='date'):
tzinfo = tzinfo or localtz
text = text.strip()
dt = _parse_date_iso8601(text, tzinfo)
if dt is None and locale != 'iso8601':
if babel and locale:
dt = _i18n_parse_date(text, tzinfo, locale)
else:
for format in ['%x %X', '%x, %X', '%X %x', '%X, %x', '%x', '%c',
'%b %d, %Y']:
try:
tm = time.strptime(text, format)
dt = tzinfo.localize(datetime(*tm[0:6]))
dt = tzinfo.normalize(dt)
break
except ValueError:
continue
if dt is None:
dt = _parse_relative_time(text, tzinfo)
if dt is None:
hint = {'datetime': get_datetime_format_hint,
'date': get_date_format_hint
}.get(hint, lambda(l): hint)(locale)
raise TracError(_('"%(date)s" is an invalid date, or the date format '
'is not known. Try "%(hint)s" instead.',
date=text, hint=hint), _('Invalid Date'))
# Make sure we can convert it to a timestamp and back - fromtimestamp()
# may raise ValueError if larger than platform C localtime() or gmtime()
try:
datetime.utcfromtimestamp(to_timestamp(dt))
except ValueError:
raise TracError(_('The date "%(date)s" is outside valid range. '
'Try a date closer to present time.', date=text),
_('Invalid Date'))
return dt
def _i18n_parse_date_pattern(locale):
format_keys = {
'y': ('y', 'Y'),
'M': ('M',),
'd': ('d',),
'h': ('h', 'H'),
'm': ('m',),
's': ('s',),
}
regexp = [r'[0-9]+']
date_format = get_date_format('medium', locale=locale)
time_format = get_time_format('medium', locale=locale)
datetime_format = get_datetime_format('medium', locale=locale)
formats = (
datetime_format.replace('{0}', time_format.format) \
.replace('{1}', date_format.format),
date_format.format)
orders = []
for format in formats:
order = []
for key, chars in format_keys.iteritems():
for char in chars:
idx = format.find('%(' + char)
if idx != -1:
order.append((idx, key))
break
order.sort()
order = dict((key, idx) for idx, (_, key) in enumerate(order))
orders.append(order)
month_names = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
if formats[0].find('%(MMM)s') != -1:
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
for num, name in names.iteritems():
name = name.lower()
month_names[name] = num
regexp.extend(month_names.iterkeys())
period_names = {'am': 'am', 'pm': 'pm'}
if formats[0].find('%(a)s') != -1:
names = get_period_names(locale=locale)
for period, name in names.iteritems():
name = name.lower()
period_names[name] = period
regexp.extend(period_names.iterkeys())
return {
'orders': orders,
'regexp': re.compile('(%s)' % '|'.join(regexp),
re.IGNORECASE | re.UNICODE),
'month_names': month_names,
'period_names': period_names,
}
_I18N_PARSE_DATE_PATTERNS = dict((l, False) for l in get_known_locales())
def _i18n_parse_date(text, tzinfo, locale):
locale = Locale.parse(locale)
key = str(locale)
pattern = _I18N_PARSE_DATE_PATTERNS.get(key)
if pattern is False:
pattern = _i18n_parse_date_pattern(locale)
_I18N_PARSE_DATE_PATTERNS[key] = pattern
if pattern is None:
return None
regexp = pattern['regexp']
period_names = pattern['period_names']
month_names = pattern['month_names']
text = text.lower()
for order in pattern['orders']:
try:
return _i18n_parse_date_0(text, order, regexp, period_names,
month_names, tzinfo)
except ValueError:
continue
return None
def _i18n_parse_date_0(text, order, regexp, period_names, month_names, tzinfo):
matches = regexp.findall(text)
if not matches:
return None
# remove am/pm markers on ahead
period = None
for idx, match in enumerate(matches):
period = period_names.get(match)
if period is not None:
del matches[idx]
break
# for date+time, use 0 seconds if seconds are missing
if 's' in order and len(matches) == 5:
matches.insert(order['s'], 0)
values = {}
for key, idx in order.iteritems():
if idx < len(matches):
value = matches[idx]
if key == 'y':
if len(value) == 2 and value.isdigit():
value = '20' + value
values[key] = value
if 'y' not in values or 'M' not in values or 'd' not in values:
raise ValueError
for key in ('y', 'M', 'd'):
value = values[key]
value = month_names.get(value)
if value is not None:
if key == 'M':
values[key] = value
else:
values[key], values['M'] = values['M'], value
break
values = dict((key, int(value)) for key, value in values.iteritems())
values.setdefault('h', 0)
values.setdefault('m', 0)
values.setdefault('s', 0)
if period and values['h'] <= 12:
if period == 'am':
values['h'] %= 12
elif period == 'pm':
values['h'] = values['h'] % 12 + 12
t = tzinfo.localize(datetime(*(values[k] for k in 'yMdhms')))
return tzinfo.normalize(t)
_REL_TIME_RE = re.compile(
r'(\d+\.?\d*)\s*'
r'(second|minute|hour|day|week|month|year|[hdwmy])s?\s*'
r'(?:ago)?$')
_time_intervals = dict(
second=lambda v: timedelta(seconds=v),
minute=lambda v: timedelta(minutes=v),
hour=lambda v: timedelta(hours=v),
day=lambda v: timedelta(days=v),
week=lambda v: timedelta(weeks=v),
month=lambda v: timedelta(days=30 * v),
year=lambda v: timedelta(days=365 * v),
h=lambda v: timedelta(hours=v),
d=lambda v: timedelta(days=v),
w=lambda v: timedelta(weeks=v),
m=lambda v: timedelta(days=30 * v),
y=lambda v: timedelta(days=365 * v),
)
_TIME_START_RE = re.compile(r'(this|last)\s*'
r'(second|minute|hour|day|week|month|year)$')
_time_starts = dict(
second=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute, now.second),
minute=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute),
hour=lambda now: datetime(now.year, now.month, now.day, now.hour),
day=lambda now: datetime(now.year, now.month, now.day),
week=lambda now: datetime(now.year, now.month, now.day) \
- timedelta(days=now.weekday()),
month=lambda now: datetime(now.year, now.month, 1),
year=lambda now: datetime(now.year, 1, 1),
)
def _parse_relative_time(text, tzinfo, now=None):
if now is None: # now argument for unit tests
now = datetime.now(tzinfo)
if text == 'now':
return now
dt = None
if text == 'today':
dt = _time_starts['day'](now)
elif text == 'yesterday':
dt = _time_starts['day'](now) - timedelta(days=1)
if dt is None:
match = _REL_TIME_RE.match(text)
if match:
(value, interval) = match.groups()
dt = now - _time_intervals[interval](float(value))
if dt is None:
match = _TIME_START_RE.match(text)
if match:
(which, start) = match.groups()
dt = _time_starts[start](now)
if which == 'last':
if start == 'month':
if dt.month > 1:
dt = dt.replace(month=dt.month - 1)
else:
dt = dt.replace(year=dt.year - 1, month=12)
elif start == 'year':
dt = dt.replace(year=dt.year - 1)
else:
dt -= _time_intervals[start](1)
if dt is None:
return None
if not dt.tzinfo:
dt = tzinfo.localize(dt)
return tzinfo.normalize(dt)
# -- formatting/parsing helper functions
def user_time(req, func, *args, **kwargs):
"""A helper function which passes to `tzinfo` and `locale` keyword
arguments of `func` using `req` parameter. It is expected to be used with
`format_*` and `parse_date` methods in `trac.util.datefmt` package.
:param req: a instance of `Request`
:param func: a function which must accept `tzinfo` and `locale` keyword
arguments
:param args: arguments which pass to `func` function
:param kwargs: keyword arguments which pass to `func` function
"""
if 'tzinfo' not in kwargs:
kwargs['tzinfo'] = getattr(req, 'tz', None)
if 'locale' not in kwargs:
kwargs['locale'] = getattr(req, 'lc_time', None)
return func(*args, **kwargs)
# -- timezone utilities
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return _zero
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
class LocalTimezone(tzinfo):
"""A 'local' time zone implementation"""
_std_offset = None
_dst_offset = None
_dst_diff = None
_std_tz = None
_dst_tz = None
@classmethod
def _initialize(cls):
cls._std_tz = cls(False)
cls._std_offset = timedelta(seconds=-time.timezone)
if time.daylight:
cls._dst_tz = cls(True)
cls._dst_offset = timedelta(seconds=-time.altzone)
else:
cls._dst_tz = cls._std_tz
cls._dst_offset = cls._std_offset
cls._dst_diff = cls._dst_offset - cls._std_offset
def __init__(self, is_dst=None):
self.is_dst = is_dst
def __str__(self):
offset = self.utcoffset(datetime.now())
secs = offset.days * 3600 * 24 + offset.seconds
hours, rem = divmod(abs(secs), 3600)
return 'UTC%c%02d:%02d' % ('-' if secs < 0 else '+', hours, rem / 60)
def __repr__(self):
if self.is_dst is None:
return '<LocalTimezone "%s" %s "%s" %s>' % \
(time.tzname[False], self._std_offset,
time.tzname[True], self._dst_offset)
if self.is_dst:
offset = self._dst_offset
else:
offset = self._std_offset
return '<LocalTimezone "%s" %s>' % (time.tzname[self.is_dst], offset)
def _is_dst(self, dt, is_dst=False):
if self.is_dst is not None:
return self.is_dst
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.weekday(), 0)
try:
std_tt = time.localtime(time.mktime(tt + (0,)))
dst_tt = time.localtime(time.mktime(tt + (1,)))
except (ValueError, OverflowError):
return False
std_correct = std_tt.tm_isdst == 0
dst_correct = dst_tt.tm_isdst == 1
if std_correct is dst_correct:
if is_dst is None:
if std_correct is True:
raise ValueError('Ambiguous time "%s"' % dt)
if std_correct is False:
raise ValueError('Non existent time "%s"' % dt)
return is_dst
if std_correct:
return False
if dst_correct:
return True
def utcoffset(self, dt):
if self._is_dst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._is_dst(dt):
return self._dst_diff
else:
return _zero
def tzname(self, dt):
return time.tzname[self._is_dst(dt)]
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
if self._is_dst(dt, is_dst):
tz = self._dst_tz
else:
tz = self._std_tz
return dt.replace(tzinfo=tz)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
if dt.tzinfo is localtz: # if not localized, returns without changes
return dt
return self.fromutc(dt.replace(tzinfo=self) - dt.utcoffset())
def fromutc(self, dt):
if dt.tzinfo is None or dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
try:
tt = time.localtime(to_timestamp(dt.replace(tzinfo=utc)))
except ValueError:
return dt.replace(tzinfo=self._std_tz) + self._std_offset
if tt.tm_isdst > 0:
tz = self._dst_tz
else:
tz = self._std_tz
return datetime(*(tt[:6] + (dt.microsecond, tz)))
utc = FixedOffset(0, 'UTC')
utcmin = datetime.min.replace(tzinfo=utc)
utcmax = datetime.max.replace(tzinfo=utc)
_epoc = datetime(1970, 1, 1, tzinfo=utc)
_zero = timedelta(0)
_min_ts = -(1 << 31)
_max_ts = (1 << 31) - 1
LocalTimezone._initialize()
localtz = LocalTimezone()
STDOFFSET = LocalTimezone._std_offset
DSTOFFSET = LocalTimezone._dst_offset
DSTDIFF = LocalTimezone._dst_diff
# Use a makeshift timezone implementation if pytz is not available.
# This implementation only supports fixed offset time zones.
#
_timezones = [
FixedOffset(0, 'UTC'),
FixedOffset(-720, 'GMT -12:00'), FixedOffset(-660, 'GMT -11:00'),
FixedOffset(-600, 'GMT -10:00'), FixedOffset(-540, 'GMT -9:00'),
FixedOffset(-480, 'GMT -8:00'), FixedOffset(-420, 'GMT -7:00'),
FixedOffset(-360, 'GMT -6:00'), FixedOffset(-300, 'GMT -5:00'),
FixedOffset(-240, 'GMT -4:00'), FixedOffset(-180, 'GMT -3:00'),
FixedOffset(-120, 'GMT -2:00'), FixedOffset(-60, 'GMT -1:00'),
FixedOffset(0, 'GMT'), FixedOffset(60, 'GMT +1:00'),
FixedOffset(120, 'GMT +2:00'), FixedOffset(180, 'GMT +3:00'),
FixedOffset(240, 'GMT +4:00'), FixedOffset(300, 'GMT +5:00'),
FixedOffset(360, 'GMT +6:00'), FixedOffset(420, 'GMT +7:00'),
FixedOffset(480, 'GMT +8:00'), FixedOffset(540, 'GMT +9:00'),
FixedOffset(600, 'GMT +10:00'), FixedOffset(660, 'GMT +11:00'),
FixedOffset(720, 'GMT +12:00'), FixedOffset(780, 'GMT +13:00')]
_tzmap = dict([(z.zone, z) for z in _timezones])
all_timezones = [z.zone for z in _timezones]
try:
import pytz
_tzoffsetmap = dict([(tz.utcoffset(None), tz) for tz in _timezones
if tz.zone != 'UTC'])
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
tz = get_timezone(tzname)
if not tz:
raise KeyError(tzname)
return tz
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
try:
# if given unicode parameter, pytz.timezone fails with:
# "type() argument 1 must be string, not unicode"
tz = pytz.timezone(to_unicode(tzname).encode('ascii', 'replace'))
except (KeyError, IOError):
tz = _tzmap.get(tzname)
if tz and tzname.startswith('Etc/'):
tz = _tzoffsetmap.get(tz.utcoffset(None))
return tz
_pytz_zones = [tzname for tzname in pytz.common_timezones
if not tzname.startswith('Etc/') and
not tzname.startswith('GMT')]
# insert just the GMT timezones into the pytz zones at the right location
# the pytz zones already include UTC so skip it
from bisect import bisect
_gmt_index = bisect(_pytz_zones, 'GMT')
all_timezones = _pytz_zones[:_gmt_index] + all_timezones[1:] + \
_pytz_zones[_gmt_index:]
except ImportError:
pytz = None
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
return _tzmap[tzname]
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
return _tzmap.get(tzname)
1.0.2dev: search locale with territory in preferred languages for "first week day" before using `babel.core.LOCALE_ALIASES`
A part of #10757.
git-svn-id: 50cd48c816abc7a9f40e8e03a4277466cb30a3e4@11776 af82e41b-90c4-0310-8c96-b1721e28e2e2
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
import math
import re
import sys
import time
from datetime import tzinfo, timedelta, datetime, date
from locale import getlocale, LC_TIME
try:
import babel
except ImportError:
babel = None
def get_known_locales():
return []
else:
from babel import Locale
from babel.core import LOCALE_ALIASES, UnknownLocaleError
from babel.dates import (
format_datetime as babel_format_datetime,
format_date as babel_format_date,
format_time as babel_format_time,
get_datetime_format, get_date_format,
get_time_format, get_month_names,
get_period_names, get_day_names
)
try:
from babel.localedata import list as get_known_locales
except ImportError:
from babel.localedata import locale_identifiers as get_known_locales
from trac.core import TracError
from trac.util.text import to_unicode, getpreferredencoding
from trac.util.translation import _, ngettext
# Date/time utilities
# -- conversion
def to_datetime(t, tzinfo=None):
"""Convert ``t`` into a `datetime` object in the ``tzinfo`` timezone.
If no ``tzinfo`` is given, the local timezone `localtz` will be used.
``t`` is converted using the following rules:
- If ``t`` is already a `datetime` object,
- if it is timezone-"naive", it is localized to ``tzinfo``
- if it is already timezone-aware, ``t`` is mapped to the given
timezone (`datetime.datetime.astimezone`)
- If ``t`` is None, the current time will be used.
- If ``t`` is a number, it is interpreted as a timestamp.
Any other input will trigger a `TypeError`.
All returned datetime instances are timezone aware and normalized.
"""
tz = tzinfo or localtz
if t is None:
dt = datetime.now(tz)
elif isinstance(t, datetime):
if t.tzinfo:
dt = t.astimezone(tz)
else:
dt = tz.localize(t)
elif isinstance(t, date):
dt = tz.localize(datetime(t.year, t.month, t.day))
elif isinstance(t, (int, long, float)):
if not (_min_ts <= t <= _max_ts):
# Handle microsecond timestamps for 0.11 compatibility
t *= 0.000001
if t < 0 and isinstance(t, float):
# Work around negative fractional times bug in Python 2.4
# http://bugs.python.org/issue1646728
frac, integer = math.modf(t)
dt = datetime.fromtimestamp(integer - 1, tz) + \
timedelta(seconds=frac + 1)
else:
dt = datetime.fromtimestamp(t, tz)
if dt:
return tz.normalize(dt)
raise TypeError('expecting datetime, int, long, float, or None; got %s' %
type(t))
def to_timestamp(dt):
"""Return the corresponding POSIX timestamp"""
if dt:
diff = dt - _epoc
return diff.days * 86400 + diff.seconds
else:
return 0
def to_utimestamp(dt):
"""Return a microsecond POSIX timestamp for the given `datetime`."""
if not dt:
return 0
diff = dt - _epoc
return (diff.days * 86400000000L + diff.seconds * 1000000
+ diff.microseconds)
def from_utimestamp(ts):
"""Return the `datetime` for the given microsecond POSIX timestamp."""
return _epoc + timedelta(microseconds=ts or 0)
# -- formatting
_units = (
(3600*24*365, lambda r: ngettext('%(num)d year', '%(num)d years', r)),
(3600*24*30, lambda r: ngettext('%(num)d month', '%(num)d months', r)),
(3600*24*7, lambda r: ngettext('%(num)d week', '%(num)d weeks', r)),
(3600*24, lambda r: ngettext('%(num)d day', '%(num)d days', r)),
(3600, lambda r: ngettext('%(num)d hour', '%(num)d hours', r)),
(60, lambda r: ngettext('%(num)d minute', '%(num)d minutes', r)))
def pretty_timedelta(time1, time2=None, resolution=None):
"""Calculate time delta between two `datetime` objects.
(the result is somewhat imprecise, only use for prettyprinting).
If either `time1` or `time2` is None, the current time will be used
instead.
"""
time1 = to_datetime(time1)
time2 = to_datetime(time2)
if time1 > time2:
time2, time1 = time1, time2
diff = time2 - time1
age_s = int(diff.days * 86400 + diff.seconds)
if resolution and age_s < resolution:
return ''
if age_s <= 60 * 1.9:
return ngettext('%(num)i second', '%(num)i seconds', age_s)
for u, format_units in _units:
r = float(age_s) / float(u)
if r >= 1.9:
r = int(round(r))
return format_units(r)
return ''
_BABEL_FORMATS = {
'datetime': {'short': '%x %H:%M', 'medium': '%x %X', 'long': '%x %X',
'full': '%x %X'},
'date': {'short': '%x', 'medium': '%x', 'long': '%x', 'full': '%x'},
'time': {'short': '%H:%M', 'medium': '%X', 'long': '%X', 'full': '%X'},
}
_STRFTIME_HINTS = {'%x %X': 'datetime', '%x': 'date', '%X': 'time'}
def _format_datetime_without_babel(t, format):
text = t.strftime(str(format))
encoding = getlocale(LC_TIME)[1] or getpreferredencoding() \
or sys.getdefaultencoding()
return unicode(text, encoding, 'replace')
def _format_datetime_iso8601(t, format, hint):
if format != 'full':
t = t.replace(microsecond=0)
text = t.isoformat() # YYYY-MM-DDThh:mm:ss.SSSSSS±hh:mm
if format == 'short':
text = text[:16] # YYYY-MM-DDThh:mm
elif format == 'medium':
text = text[:19] # YYYY-MM-DDThh:mm:ss
elif text.endswith('+00:00'):
text = text[:-6] + 'Z'
if hint == 'date':
text = text.split('T', 1)[0]
elif hint == 'time':
text = text.split('T', 1)[1]
return unicode(text, 'ascii')
def _format_datetime(t, format, tzinfo, locale, hint):
t = to_datetime(t, tzinfo or localtz)
if format == 'iso8601':
return _format_datetime_iso8601(t, 'long', hint)
if format in ('iso8601date', 'iso8601time'):
return _format_datetime_iso8601(t, 'long', format[7:])
if locale == 'iso8601':
if format is None:
format = 'long'
elif format in _STRFTIME_HINTS:
hint = _STRFTIME_HINTS[format]
format = 'long'
if format in ('short', 'medium', 'long', 'full'):
return _format_datetime_iso8601(t, format, hint)
return _format_datetime_without_babel(t, format)
if babel and locale:
if format is None:
format = 'medium'
elif format in _STRFTIME_HINTS:
hint = _STRFTIME_HINTS[format]
format = 'medium'
if format in ('short', 'medium', 'long', 'full'):
if hint == 'datetime':
return babel_format_datetime(t, format, None, locale)
if hint == 'date':
return babel_format_date(t, format, locale)
if hint == 'time':
return babel_format_time(t, format, None, locale)
format = _BABEL_FORMATS[hint].get(format, format)
return _format_datetime_without_babel(t, format)
def format_datetime(t=None, format='%x %X', tzinfo=None, locale=None):
"""Format the `datetime` object `t` into an `unicode` string
If `t` is None, the current time will be used.
The formatting will be done using the given `format`, which consist
of conventional `strftime` keys. In addition the format can be 'iso8601'
to specify the international date format (compliant with RFC 3339).
`tzinfo` will default to the local timezone if left to `None`.
"""
return _format_datetime(t, format, tzinfo, locale, 'datetime')
def format_date(t=None, format='%x', tzinfo=None, locale=None):
"""Convenience method for formatting the date part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'date')
def format_time(t=None, format='%X', tzinfo=None, locale=None):
"""Convenience method for formatting the time part of a `datetime` object.
See `format_datetime` for more details.
"""
return _format_datetime(t, format, tzinfo, locale, 'time')
def get_date_format_hint(locale=None):
"""Present the default format used by `format_date` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return 'YYYY-MM-DD'
if babel and locale:
format = get_date_format('medium', locale=locale)
return format.pattern
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1)
def get_datetime_format_hint(locale=None):
"""Present the default format used by `format_datetime` in a human readable
form.
This is a format that will be recognized by `parse_date` when reading a
date.
"""
if locale == 'iso8601':
return u'YYYY-MM-DDThh:mm:ss±hh:mm'
if babel and locale:
date_pattern = get_date_format('medium', locale=locale).pattern
time_pattern = get_time_format('medium', locale=locale).pattern
format = get_datetime_format('medium', locale=locale)
return format.replace('{0}', time_pattern) \
.replace('{1}', date_pattern)
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'a', 1)
return tmpl.replace('1999', 'YYYY', 1).replace('99', 'YY', 1) \
.replace('10', 'MM', 1).replace('29', 'DD', 1) \
.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_month_names_jquery_ui(req):
"""Get the month names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
month_names = {}
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
month_names[width] = [names[i + 1] for i in xrange(12)]
return month_names
return {
'wide': (
'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'),
'abbreviated': (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec'),
}
def get_day_names_jquery_ui(req):
"""Get the day names for the jQuery UI datepicker library"""
locale = req.lc_time
if locale == 'iso8601':
locale = req.locale
if babel and locale:
day_names = {}
for width in ('wide', 'abbreviated', 'narrow'):
names = get_day_names(width, locale=locale)
day_names[width] = [names[(i + 6) % 7] for i in xrange(7)]
return day_names
return {
'wide': ('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday'),
'abbreviated': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
'narrow': ('Su', 'Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa'),
}
def get_date_format_jquery_ui(locale):
"""Get the date format for the jQuery UI datepicker library."""
if locale == 'iso8601':
return 'yy-mm-dd'
if babel and locale:
values = {'yyyy': 'yy', 'y': 'yy', 'M': 'm', 'MM': 'mm', 'MMM': 'M',
'd': 'd', 'dd': 'dd'}
return get_date_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, tzinfo=utc)
tmpl = format_date(t, tzinfo=utc)
return tmpl.replace('1999', 'yy', 1).replace('99', 'y', 1) \
.replace('10', 'mm', 1).replace('29', 'dd', 1)
def get_time_format_jquery_ui(locale):
"""Get the time format for the jQuery UI timepicker addon."""
if locale == 'iso8601':
return 'hh:mm:ssz' # XXX timepicker doesn't support 'ISO_8601'
if babel and locale:
values = {'h': 'h', 'hh': 'hh', 'H': 'h', 'HH': 'hh',
'm': 'm', 'mm': 'mm', 's': 's', 'ss': 'ss',
'a': 'TT'}
return get_time_format('medium', locale=locale).format % values
t = datetime(1999, 10, 29, 23, 59, 58, tzinfo=utc)
tmpl = format_time(t, tzinfo=utc)
ampm = format_time(t, '%p', tzinfo=utc)
if ampm:
tmpl = tmpl.replace(ampm, 'TT', 1)
return tmpl.replace('23', 'hh', 1).replace('11', 'hh', 1) \
.replace('59', 'mm', 1).replace('58', 'ss', 1)
def get_timezone_list_jquery_ui(t=None):
"""Get timezone list for jQuery timepicker addon"""
t = datetime.now(utc) if t is None else utc.localize(t)
zones = set(t.astimezone(get_timezone(tz)).strftime('%z')
for tz in all_timezones)
return [{'value': 'Z', 'label': '+00:00'} \
if zone == '+0000' else zone[:-2] + ':' + zone[-2:]
for zone in sorted(zones, key=lambda tz: int(tz))]
def get_first_week_day_jquery_ui(req):
"""Get first week day for jQuery date picker"""
locale = req.lc_time
if locale == 'iso8601':
return 1 # Monday
if babel and locale:
if not locale.territory:
# search first locale which has the same `langauge` and territory
# in preferred languages
for l in req.languages:
l = l.replace('-', '_').lower()
if l.startswith(locale.language.lower() + '_'):
try:
l = Locale.parse(l)
if l.territory:
locale = l
break
except UnknownLocaleError:
pass
if not locale.territory and locale.language in LOCALE_ALIASES:
locale = Locale.parse(LOCALE_ALIASES[locale.language])
return (locale.first_week_day + 1) % 7
return 0 # Sunday
def is_24_hours(locale):
"""Returns `True` for 24 hour time formats."""
if locale == 'iso8601':
return True
t = datetime(1999, 10, 29, 23, tzinfo=utc)
tmpl = format_datetime(t, tzinfo=utc, locale=locale)
return '23' in tmpl
def http_date(t=None):
"""Format `datetime` object `t` as a rfc822 timestamp"""
t = to_datetime(t, utc)
weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')
return '%s, %02d %s %04d %02d:%02d:%02d GMT' % (
weekdays[t.weekday()], t.day, months[t.month - 1], t.year,
t.hour, t.minute, t.second)
# -- parsing
_ISO_8601_RE = re.compile(r'''
(\d\d\d\d)(?:-?(\d\d)(?:-?(\d\d))?)? # date
(?:
[T ]
(\d\d)(?::?(\d\d)(?::?(\d\d) # time
(?:[,.](\d{1,6}))?)?)? # microseconds
)?
(Z?(?:([-+])?(\d\d):?(\d\d)?)?)?$ # timezone
''', re.VERBOSE)
def _parse_date_iso8601(text, tzinfo):
match = _ISO_8601_RE.match(text)
if match:
try:
g = match.groups()
years = g[0]
months = g[1] or '01'
days = g[2] or '01'
hours, minutes, seconds, useconds = [x or '00' for x in g[3:7]]
useconds = (useconds + '000000')[:6]
z, tzsign, tzhours, tzminutes = g[7:11]
if z:
tz = timedelta(hours=int(tzhours or '0'),
minutes=int(tzminutes or '0')).seconds / 60
if tz == 0:
tzinfo = utc
else:
tzinfo = FixedOffset(-tz if tzsign == '-' else tz,
'%s%s:%s' %
(tzsign, tzhours, tzminutes))
tm = [int(x) for x in (years, months, days,
hours, minutes, seconds, useconds)]
t = tzinfo.localize(datetime(*tm))
return tzinfo.normalize(t)
except ValueError:
pass
return None
def parse_date(text, tzinfo=None, locale=None, hint='date'):
tzinfo = tzinfo or localtz
text = text.strip()
dt = _parse_date_iso8601(text, tzinfo)
if dt is None and locale != 'iso8601':
if babel and locale:
dt = _i18n_parse_date(text, tzinfo, locale)
else:
for format in ['%x %X', '%x, %X', '%X %x', '%X, %x', '%x', '%c',
'%b %d, %Y']:
try:
tm = time.strptime(text, format)
dt = tzinfo.localize(datetime(*tm[0:6]))
dt = tzinfo.normalize(dt)
break
except ValueError:
continue
if dt is None:
dt = _parse_relative_time(text, tzinfo)
if dt is None:
hint = {'datetime': get_datetime_format_hint,
'date': get_date_format_hint
}.get(hint, lambda(l): hint)(locale)
raise TracError(_('"%(date)s" is an invalid date, or the date format '
'is not known. Try "%(hint)s" instead.',
date=text, hint=hint), _('Invalid Date'))
# Make sure we can convert it to a timestamp and back - fromtimestamp()
# may raise ValueError if larger than platform C localtime() or gmtime()
try:
datetime.utcfromtimestamp(to_timestamp(dt))
except ValueError:
raise TracError(_('The date "%(date)s" is outside valid range. '
'Try a date closer to present time.', date=text),
_('Invalid Date'))
return dt
def _i18n_parse_date_pattern(locale):
format_keys = {
'y': ('y', 'Y'),
'M': ('M',),
'd': ('d',),
'h': ('h', 'H'),
'm': ('m',),
's': ('s',),
}
regexp = [r'[0-9]+']
date_format = get_date_format('medium', locale=locale)
time_format = get_time_format('medium', locale=locale)
datetime_format = get_datetime_format('medium', locale=locale)
formats = (
datetime_format.replace('{0}', time_format.format) \
.replace('{1}', date_format.format),
date_format.format)
orders = []
for format in formats:
order = []
for key, chars in format_keys.iteritems():
for char in chars:
idx = format.find('%(' + char)
if idx != -1:
order.append((idx, key))
break
order.sort()
order = dict((key, idx) for idx, (_, key) in enumerate(order))
orders.append(order)
month_names = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
if formats[0].find('%(MMM)s') != -1:
for width in ('wide', 'abbreviated'):
names = get_month_names(width, locale=locale)
for num, name in names.iteritems():
name = name.lower()
month_names[name] = num
regexp.extend(month_names.iterkeys())
period_names = {'am': 'am', 'pm': 'pm'}
if formats[0].find('%(a)s') != -1:
names = get_period_names(locale=locale)
for period, name in names.iteritems():
name = name.lower()
period_names[name] = period
regexp.extend(period_names.iterkeys())
return {
'orders': orders,
'regexp': re.compile('(%s)' % '|'.join(regexp),
re.IGNORECASE | re.UNICODE),
'month_names': month_names,
'period_names': period_names,
}
_I18N_PARSE_DATE_PATTERNS = dict((l, False) for l in get_known_locales())
def _i18n_parse_date(text, tzinfo, locale):
locale = Locale.parse(locale)
key = str(locale)
pattern = _I18N_PARSE_DATE_PATTERNS.get(key)
if pattern is False:
pattern = _i18n_parse_date_pattern(locale)
_I18N_PARSE_DATE_PATTERNS[key] = pattern
if pattern is None:
return None
regexp = pattern['regexp']
period_names = pattern['period_names']
month_names = pattern['month_names']
text = text.lower()
for order in pattern['orders']:
try:
return _i18n_parse_date_0(text, order, regexp, period_names,
month_names, tzinfo)
except ValueError:
continue
return None
def _i18n_parse_date_0(text, order, regexp, period_names, month_names, tzinfo):
matches = regexp.findall(text)
if not matches:
return None
# remove am/pm markers on ahead
period = None
for idx, match in enumerate(matches):
period = period_names.get(match)
if period is not None:
del matches[idx]
break
# for date+time, use 0 seconds if seconds are missing
if 's' in order and len(matches) == 5:
matches.insert(order['s'], 0)
values = {}
for key, idx in order.iteritems():
if idx < len(matches):
value = matches[idx]
if key == 'y':
if len(value) == 2 and value.isdigit():
value = '20' + value
values[key] = value
if 'y' not in values or 'M' not in values or 'd' not in values:
raise ValueError
for key in ('y', 'M', 'd'):
value = values[key]
value = month_names.get(value)
if value is not None:
if key == 'M':
values[key] = value
else:
values[key], values['M'] = values['M'], value
break
values = dict((key, int(value)) for key, value in values.iteritems())
values.setdefault('h', 0)
values.setdefault('m', 0)
values.setdefault('s', 0)
if period and values['h'] <= 12:
if period == 'am':
values['h'] %= 12
elif period == 'pm':
values['h'] = values['h'] % 12 + 12
t = tzinfo.localize(datetime(*(values[k] for k in 'yMdhms')))
return tzinfo.normalize(t)
_REL_TIME_RE = re.compile(
r'(\d+\.?\d*)\s*'
r'(second|minute|hour|day|week|month|year|[hdwmy])s?\s*'
r'(?:ago)?$')
_time_intervals = dict(
second=lambda v: timedelta(seconds=v),
minute=lambda v: timedelta(minutes=v),
hour=lambda v: timedelta(hours=v),
day=lambda v: timedelta(days=v),
week=lambda v: timedelta(weeks=v),
month=lambda v: timedelta(days=30 * v),
year=lambda v: timedelta(days=365 * v),
h=lambda v: timedelta(hours=v),
d=lambda v: timedelta(days=v),
w=lambda v: timedelta(weeks=v),
m=lambda v: timedelta(days=30 * v),
y=lambda v: timedelta(days=365 * v),
)
_TIME_START_RE = re.compile(r'(this|last)\s*'
r'(second|minute|hour|day|week|month|year)$')
_time_starts = dict(
second=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute, now.second),
minute=lambda now: datetime(now.year, now.month, now.day, now.hour,
now.minute),
hour=lambda now: datetime(now.year, now.month, now.day, now.hour),
day=lambda now: datetime(now.year, now.month, now.day),
week=lambda now: datetime(now.year, now.month, now.day) \
- timedelta(days=now.weekday()),
month=lambda now: datetime(now.year, now.month, 1),
year=lambda now: datetime(now.year, 1, 1),
)
def _parse_relative_time(text, tzinfo, now=None):
if now is None: # now argument for unit tests
now = datetime.now(tzinfo)
if text == 'now':
return now
dt = None
if text == 'today':
dt = _time_starts['day'](now)
elif text == 'yesterday':
dt = _time_starts['day'](now) - timedelta(days=1)
if dt is None:
match = _REL_TIME_RE.match(text)
if match:
(value, interval) = match.groups()
dt = now - _time_intervals[interval](float(value))
if dt is None:
match = _TIME_START_RE.match(text)
if match:
(which, start) = match.groups()
dt = _time_starts[start](now)
if which == 'last':
if start == 'month':
if dt.month > 1:
dt = dt.replace(month=dt.month - 1)
else:
dt = dt.replace(year=dt.year - 1, month=12)
elif start == 'year':
dt = dt.replace(year=dt.year - 1)
else:
dt -= _time_intervals[start](1)
if dt is None:
return None
if not dt.tzinfo:
dt = tzinfo.localize(dt)
return tzinfo.normalize(dt)
# -- formatting/parsing helper functions
def user_time(req, func, *args, **kwargs):
"""A helper function which passes to `tzinfo` and `locale` keyword
arguments of `func` using `req` parameter. It is expected to be used with
`format_*` and `parse_date` methods in `trac.util.datefmt` package.
:param req: a instance of `Request`
:param func: a function which must accept `tzinfo` and `locale` keyword
arguments
:param args: arguments which pass to `func` function
:param kwargs: keyword arguments which pass to `func` function
"""
if 'tzinfo' not in kwargs:
kwargs['tzinfo'] = getattr(req, 'tz', None)
if 'locale' not in kwargs:
kwargs['locale'] = getattr(req, 'lc_time', None)
return func(*args, **kwargs)
# -- timezone utilities
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return _zero
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
return dt
class LocalTimezone(tzinfo):
"""A 'local' time zone implementation"""
_std_offset = None
_dst_offset = None
_dst_diff = None
_std_tz = None
_dst_tz = None
@classmethod
def _initialize(cls):
cls._std_tz = cls(False)
cls._std_offset = timedelta(seconds=-time.timezone)
if time.daylight:
cls._dst_tz = cls(True)
cls._dst_offset = timedelta(seconds=-time.altzone)
else:
cls._dst_tz = cls._std_tz
cls._dst_offset = cls._std_offset
cls._dst_diff = cls._dst_offset - cls._std_offset
def __init__(self, is_dst=None):
self.is_dst = is_dst
def __str__(self):
offset = self.utcoffset(datetime.now())
secs = offset.days * 3600 * 24 + offset.seconds
hours, rem = divmod(abs(secs), 3600)
return 'UTC%c%02d:%02d' % ('-' if secs < 0 else '+', hours, rem / 60)
def __repr__(self):
if self.is_dst is None:
return '<LocalTimezone "%s" %s "%s" %s>' % \
(time.tzname[False], self._std_offset,
time.tzname[True], self._dst_offset)
if self.is_dst:
offset = self._dst_offset
else:
offset = self._std_offset
return '<LocalTimezone "%s" %s>' % (time.tzname[self.is_dst], offset)
def _is_dst(self, dt, is_dst=False):
if self.is_dst is not None:
return self.is_dst
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.weekday(), 0)
try:
std_tt = time.localtime(time.mktime(tt + (0,)))
dst_tt = time.localtime(time.mktime(tt + (1,)))
except (ValueError, OverflowError):
return False
std_correct = std_tt.tm_isdst == 0
dst_correct = dst_tt.tm_isdst == 1
if std_correct is dst_correct:
if is_dst is None:
if std_correct is True:
raise ValueError('Ambiguous time "%s"' % dt)
if std_correct is False:
raise ValueError('Non existent time "%s"' % dt)
return is_dst
if std_correct:
return False
if dst_correct:
return True
def utcoffset(self, dt):
if self._is_dst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._is_dst(dt):
return self._dst_diff
else:
return _zero
def tzname(self, dt):
return time.tzname[self._is_dst(dt)]
def localize(self, dt, is_dst=False):
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
if self._is_dst(dt, is_dst):
tz = self._dst_tz
else:
tz = self._std_tz
return dt.replace(tzinfo=tz)
def normalize(self, dt, is_dst=False):
if dt.tzinfo is None:
raise ValueError('Naive time (no tzinfo set)')
if dt.tzinfo is localtz: # if not localized, returns without changes
return dt
return self.fromutc(dt.replace(tzinfo=self) - dt.utcoffset())
def fromutc(self, dt):
if dt.tzinfo is None or dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
try:
tt = time.localtime(to_timestamp(dt.replace(tzinfo=utc)))
except ValueError:
return dt.replace(tzinfo=self._std_tz) + self._std_offset
if tt.tm_isdst > 0:
tz = self._dst_tz
else:
tz = self._std_tz
return datetime(*(tt[:6] + (dt.microsecond, tz)))
utc = FixedOffset(0, 'UTC')
utcmin = datetime.min.replace(tzinfo=utc)
utcmax = datetime.max.replace(tzinfo=utc)
_epoc = datetime(1970, 1, 1, tzinfo=utc)
_zero = timedelta(0)
_min_ts = -(1 << 31)
_max_ts = (1 << 31) - 1
LocalTimezone._initialize()
localtz = LocalTimezone()
STDOFFSET = LocalTimezone._std_offset
DSTOFFSET = LocalTimezone._dst_offset
DSTDIFF = LocalTimezone._dst_diff
# Use a makeshift timezone implementation if pytz is not available.
# This implementation only supports fixed offset time zones.
#
_timezones = [
FixedOffset(0, 'UTC'),
FixedOffset(-720, 'GMT -12:00'), FixedOffset(-660, 'GMT -11:00'),
FixedOffset(-600, 'GMT -10:00'), FixedOffset(-540, 'GMT -9:00'),
FixedOffset(-480, 'GMT -8:00'), FixedOffset(-420, 'GMT -7:00'),
FixedOffset(-360, 'GMT -6:00'), FixedOffset(-300, 'GMT -5:00'),
FixedOffset(-240, 'GMT -4:00'), FixedOffset(-180, 'GMT -3:00'),
FixedOffset(-120, 'GMT -2:00'), FixedOffset(-60, 'GMT -1:00'),
FixedOffset(0, 'GMT'), FixedOffset(60, 'GMT +1:00'),
FixedOffset(120, 'GMT +2:00'), FixedOffset(180, 'GMT +3:00'),
FixedOffset(240, 'GMT +4:00'), FixedOffset(300, 'GMT +5:00'),
FixedOffset(360, 'GMT +6:00'), FixedOffset(420, 'GMT +7:00'),
FixedOffset(480, 'GMT +8:00'), FixedOffset(540, 'GMT +9:00'),
FixedOffset(600, 'GMT +10:00'), FixedOffset(660, 'GMT +11:00'),
FixedOffset(720, 'GMT +12:00'), FixedOffset(780, 'GMT +13:00')]
_tzmap = dict([(z.zone, z) for z in _timezones])
all_timezones = [z.zone for z in _timezones]
try:
import pytz
_tzoffsetmap = dict([(tz.utcoffset(None), tz) for tz in _timezones
if tz.zone != 'UTC'])
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
tz = get_timezone(tzname)
if not tz:
raise KeyError(tzname)
return tz
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
try:
# if given unicode parameter, pytz.timezone fails with:
# "type() argument 1 must be string, not unicode"
tz = pytz.timezone(to_unicode(tzname).encode('ascii', 'replace'))
except (KeyError, IOError):
tz = _tzmap.get(tzname)
if tz and tzname.startswith('Etc/'):
tz = _tzoffsetmap.get(tz.utcoffset(None))
return tz
_pytz_zones = [tzname for tzname in pytz.common_timezones
if not tzname.startswith('Etc/') and
not tzname.startswith('GMT')]
# insert just the GMT timezones into the pytz zones at the right location
# the pytz zones already include UTC so skip it
from bisect import bisect
_gmt_index = bisect(_pytz_zones, 'GMT')
all_timezones = _pytz_zones[:_gmt_index] + all_timezones[1:] + \
_pytz_zones[_gmt_index:]
except ImportError:
pytz = None
def timezone(tzname):
"""Fetch timezone instance by name or raise `KeyError`"""
return _tzmap[tzname]
def get_timezone(tzname):
"""Fetch timezone instance by name or return `None`"""
return _tzmap.get(tzname)
|
import os
from collections import OrderedDict
import torch.nn as nn
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
import torch
import torch.nn.functional as F
from test_tube import HyperOptArgumentParser
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.root_module.root_module import LightningModule
class LightningTemplateModel(LightningModule):
"""
Sample model to show how to define a template
"""
def __init__(self, hparams):
"""
Pass in parsed HyperOptArgumentParser to the model
:param hparams:
"""
# init superclass
super(LightningTemplateModel, self).__init__(hparams)
self.batch_size = hparams.batch_size
# build model
self.__build_model()
# ---------------------
# MODEL SETUP
# ---------------------
def __build_model(self):
"""
Layout model
:return:
"""
self.c_d1 = nn.Linear(in_features=self.hparams.in_features, out_features=self.hparams.hidden_dim)
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim, out_features=self.hparams.out_features)
# ---------------------
# TRAINING
# ---------------------
def forward(self, x):
"""
No special modification required for lightning, define as you normally would
:param x:
:return:
"""
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
logits = F.log_softmax(x, dim=1)
return logits
def loss(self, labels, logits):
nll = F.nll_loss(logits, labels)
return nll
def training_step(self, data_batch, batch_i):
"""
Lightning calls this inside the training loop
:param data_batch:
:return:
"""
# forward pass
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
# calculate loss
loss_val = self.loss(y, y_hat)
output = OrderedDict({
'loss': loss_val
})
return loss_val
def validation_step(self, data_batch, batch_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
output = OrderedDict({
'val_loss': loss_val,
'val_acc': torch.tensor(val_acc),
})
return loss_val
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss_mean += output['val_loss']
val_acc_mean += output['val_acc']
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dic = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
return tqdm_dic
def update_tng_log_metrics(self, logs):
return logs
# ---------------------
# MODEL SAVING
# ---------------------
def get_save_dict(self):
checkpoint = {'state_dict': self.state_dict()}
return checkpoint
def load_model_specific(self, checkpoint):
self.load_state_dict(checkpoint['state_dict'])
pass
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
return whatever optimizers we want here
:return: list of optimizers
"""
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]
def __dataloader(self, train):
# init data generators
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root=self.hparams.data_root, train=train, transform=transform, download=True)
# when using multi-node we need to add the datasampler
train_sampler = None
batch_size = self.hparams.batch_size
try:
if self.on_gpu:
train_sampler = DistributedSampler(dataset, rank=self.trainer.proc_rank)
batch_size = batch_size // self.trainer.world_size # scale batch size
except Exception as e:
pass
should_shuffle = train_sampler is None
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=should_shuffle,
sampler=train_sampler
)
return loader
@property
def tng_dataloader(self):
if self._tng_dataloader is None:
try:
self._tng_dataloader = self.__dataloader(train=True)
except Exception as e:
print(e)
raise e
return self._tng_dataloader
@property
def val_dataloader(self):
if self._val_dataloader is None:
try:
self._val_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._val_dataloader
@property
def test_dataloader(self):
if self._test_dataloader is None:
try:
self._test_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._test_dataloader
@staticmethod
def add_model_specific_args(parent_parser, root_dir):
"""
Parameters you define here will be available to your model through self.hparams
:param parent_parser:
:param root_dir:
:return:
"""
parser = HyperOptArgumentParser(strategy=parent_parser.strategy, parents=[parent_parser])
# param overwrites
# parser.set_defaults(gradient_clip=5.0)
# network params
parser.opt_list('--drop_prob', default=0.2, options=[0.2, 0.5], type=float, tunable=False)
parser.add_argument('--in_features', default=28*28, type=int)
parser.add_argument('--out_features', default=10, type=int)
parser.add_argument('--hidden_dim', default=50000, type=int) # use 500 for CPU, 50000 for GPU to see speed difference
# data
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
# training params (opt)
parser.opt_list('--learning_rate', default=0.001*8, type=float, options=[0.0001, 0.0005, 0.001, 0.005],
tunable=False)
parser.opt_list('--optimizer_name', default='adam', type=str, options=['adam'], tunable=False)
# if using 2 nodes with 4 gpus each the batch size here (256) will be 256 / (2*8) = 16 per gpu
parser.opt_list('--batch_size', default=256*8, type=int, options=[32, 64, 128, 256], tunable=False,
help='batch size will be divided over all the gpus being used across all nodes')
return parser
set dp as default backend
import os
from collections import OrderedDict
import torch.nn as nn
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
import torch
import torch.nn.functional as F
from test_tube import HyperOptArgumentParser
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.root_module.root_module import LightningModule
class LightningTemplateModel(LightningModule):
"""
Sample model to show how to define a template
"""
def __init__(self, hparams):
"""
Pass in parsed HyperOptArgumentParser to the model
:param hparams:
"""
# init superclass
super(LightningTemplateModel, self).__init__(hparams)
self.batch_size = hparams.batch_size
# build model
self.__build_model()
# ---------------------
# MODEL SETUP
# ---------------------
def __build_model(self):
"""
Layout model
:return:
"""
self.c_d1 = nn.Linear(in_features=self.hparams.in_features, out_features=self.hparams.hidden_dim)
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim, out_features=self.hparams.out_features)
# ---------------------
# TRAINING
# ---------------------
def forward(self, x):
"""
No special modification required for lightning, define as you normally would
:param x:
:return:
"""
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
logits = F.log_softmax(x, dim=1)
return logits
def loss(self, labels, logits):
nll = F.nll_loss(logits, labels)
return nll
def training_step(self, data_batch, batch_i):
"""
Lightning calls this inside the training loop
:param data_batch:
:return:
"""
# forward pass
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
# calculate loss
loss_val = self.loss(y, y_hat)
output = OrderedDict({
'loss': loss_val
})
return loss_val
def validation_step(self, data_batch, batch_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
output = OrderedDict({
'val_loss': loss_val,
'val_acc': torch.tensor(val_acc).cuda(loss_val.device.index),
})
return output
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss_mean += output['val_loss']
val_acc_mean += output['val_acc']
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dic = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
return tqdm_dic
def update_tng_log_metrics(self, logs):
return logs
# ---------------------
# MODEL SAVING
# ---------------------
def get_save_dict(self):
checkpoint = {'state_dict': self.state_dict()}
return checkpoint
def load_model_specific(self, checkpoint):
self.load_state_dict(checkpoint['state_dict'])
pass
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
return whatever optimizers we want here
:return: list of optimizers
"""
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
return [optimizer]
def __dataloader(self, train):
# init data generators
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root=self.hparams.data_root, train=train, transform=transform, download=True)
# when using multi-node we need to add the datasampler
train_sampler = None
batch_size = self.hparams.batch_size
try:
if self.on_gpu:
train_sampler = DistributedSampler(dataset, rank=self.trainer.proc_rank)
batch_size = batch_size // self.trainer.world_size # scale batch size
except Exception as e:
pass
should_shuffle = train_sampler is None
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=should_shuffle,
sampler=train_sampler
)
return loader
@property
def tng_dataloader(self):
if self._tng_dataloader is None:
try:
self._tng_dataloader = self.__dataloader(train=True)
except Exception as e:
print(e)
raise e
return self._tng_dataloader
@property
def val_dataloader(self):
if self._val_dataloader is None:
try:
self._val_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._val_dataloader
@property
def test_dataloader(self):
if self._test_dataloader is None:
try:
self._test_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._test_dataloader
@staticmethod
def add_model_specific_args(parent_parser, root_dir):
"""
Parameters you define here will be available to your model through self.hparams
:param parent_parser:
:param root_dir:
:return:
"""
parser = HyperOptArgumentParser(strategy=parent_parser.strategy, parents=[parent_parser])
# param overwrites
# parser.set_defaults(gradient_clip=5.0)
# network params
parser.opt_list('--drop_prob', default=0.2, options=[0.2, 0.5], type=float, tunable=False)
parser.add_argument('--in_features', default=28*28, type=int)
parser.add_argument('--out_features', default=10, type=int)
parser.add_argument('--hidden_dim', default=50000, type=int) # use 500 for CPU, 50000 for GPU to see speed difference
# data
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
# training params (opt)
parser.opt_list('--learning_rate', default=0.001*8, type=float, options=[0.0001, 0.0005, 0.001, 0.005],
tunable=False)
parser.opt_list('--optimizer_name', default='adam', type=str, options=['adam'], tunable=False)
# if using 2 nodes with 4 gpus each the batch size here (256) will be 256 / (2*8) = 16 per gpu
parser.opt_list('--batch_size', default=256*8, type=int, options=[32, 64, 128, 256], tunable=False,
help='batch size will be divided over all the gpus being used across all nodes')
return parser
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "{{package_name}}"
PACKAGE_PPRINT_NAME = "{{package_pprint_name}}"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'{{classifier}}',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
{%- for nspkg_name in nspkg_names %}
'{{ nspkg_name }}',
{%- endfor %}
]),
include_package_data=True,
package_data={
'pytyped': ['py.typed'],
},
install_requires=[
'msrest>=0.6.21',
{%- if need_msrestazure %}
'msrestazure>=0.4.32,<2.0.0',
{%- endif %}
'azure-common~=1.1',
{%- if need_azurecore %}
'azure-core>=1.6.0,<2.0.0',
{%- endif %}
{%- if need_azuremgmtcore %}
'azure-mgmt-core>=1.3.0,<2.0.0',
{%- endif %}
],
python_requires=">=3.6"
)
Update setup.py (#24832)
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "{{package_name}}"
PACKAGE_PPRINT_NAME = "{{package_pprint_name}}"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'{{classifier}}',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
{%- for nspkg_name in nspkg_names %}
'{{ nspkg_name }}',
{%- endfor %}
]),
include_package_data=True,
package_data={
'pytyped': ['py.typed'],
},
install_requires=[
'msrest>=0.6.21',
{%- if need_msrestazure %}
'msrestazure>=0.4.32,<2.0.0',
{%- endif %}
'azure-common~=1.1',
{%- if need_azurecore %}
'azure-core>=1.23.0,<2.0.0',
{%- endif %}
{%- if need_azuremgmtcore %}
'azure-mgmt-core>=1.3.1,<2.0.0',
{%- endif %}
],
python_requires=">=3.6"
)
|
from __future__ import print_function
from datetime import datetime
import os
import random
import socket
import sys
from .log import error, debug
def get_valid_seconds(aws_expiration_date, utcnow):
try:
credentials_valid_until = datetime.strptime(aws_expiration_date, "%Y-%m-%dT%H:%M:%SZ", )
return (credentials_valid_until - utcnow).seconds
except ValueError:
default_seconds = 3600
msg = "Failed to parse expiration date '{0}' for AWS credentials, assuming {1} seconds.".format(
aws_expiration_date, default_seconds)
print(msg, file=sys.stderr)
return default_seconds
def get_default_afp_server():
"""Return the FQDN of the host that is called "afp"
This is done by resolving "afp" into (potentially multiple) IPs.
One of those IPs is randomly chosen, then a reverse-lookup is performed
on that IP to get its FQDN.
"""
try:
addrinfos = socket.getaddrinfo("afp", 443,
socket.AF_INET, socket.SOCK_STREAM)
except Exception as exc:
error("Could not resolve hostname 'afp': %s" % exc)
addrinfo = random.choice(addrinfos)
afp_server_ip = addrinfo[4][0]
try:
return socket.gethostbyaddr(afp_server_ip)[0]
except Exception as exc:
error("DNS reverse lookup failed for IP %s: %s" % (
afp_server_ip, exc))
def get_first_role(federation_client, account):
try:
accounts_and_roles = federation_client.get_account_and_role_list()
except Exception as exc:
error("Failed to get account list from AWS: %s" % exc)
try:
return sorted(accounts_and_roles[account])[0]
except KeyError:
error("%s is not a valid AWS account" % account)
except IndexError:
error("Could not find any role for account %s" % account)
def get_aws_credentials(federation_client, account, role):
try:
aws_credentials = federation_client.get_aws_credentials(account, role)
except Exception as exc:
error("Failed to get credentials from AWS: %s" % exc)
aws_credentials['AWS_VALID_SECONDS'] = get_valid_seconds(aws_credentials['AWS_EXPIRATION_DATE'],
datetime.utcnow())
aws_credentials['AWS_ACCOUNT_NAME'] = account
aws_credentials['AWS_ASSUMED_ROLE'] = role
return aws_credentials
fix pep8 errors
from __future__ import print_function
from datetime import datetime
import random
import socket
import sys
from .log import error
def get_valid_seconds(aws_expiration_date, utcnow):
try:
credentials_valid_until = datetime.strptime(aws_expiration_date, "%Y-%m-%dT%H:%M:%SZ", )
return (credentials_valid_until - utcnow).seconds
except ValueError:
default_seconds = 3600
msg = "Failed to parse expiration date '{0}' for AWS credentials, assuming {1} seconds.".format(
aws_expiration_date, default_seconds)
print(msg, file=sys.stderr)
return default_seconds
def get_default_afp_server():
"""Return the FQDN of the host that is called "afp"
This is done by resolving "afp" into (potentially multiple) IPs.
One of those IPs is randomly chosen, then a reverse-lookup is performed
on that IP to get its FQDN.
"""
try:
addrinfos = socket.getaddrinfo("afp", 443,
socket.AF_INET, socket.SOCK_STREAM)
except Exception as exc:
error("Could not resolve hostname 'afp': %s" % exc)
addrinfo = random.choice(addrinfos)
afp_server_ip = addrinfo[4][0]
try:
return socket.gethostbyaddr(afp_server_ip)[0]
except Exception as exc:
error("DNS reverse lookup failed for IP %s: %s" % (
afp_server_ip, exc))
def get_first_role(federation_client, account):
try:
accounts_and_roles = federation_client.get_account_and_role_list()
except Exception as exc:
error("Failed to get account list from AWS: %s" % exc)
try:
return sorted(accounts_and_roles[account])[0]
except KeyError:
error("%s is not a valid AWS account" % account)
except IndexError:
error("Could not find any role for account %s" % account)
def get_aws_credentials(federation_client, account, role):
try:
aws_credentials = federation_client.get_aws_credentials(account, role)
except Exception as exc:
error("Failed to get credentials from AWS: %s" % exc)
aws_credentials['AWS_VALID_SECONDS'] = get_valid_seconds(aws_credentials['AWS_EXPIRATION_DATE'],
datetime.utcnow())
aws_credentials['AWS_ACCOUNT_NAME'] = account
aws_credentials['AWS_ASSUMED_ROLE'] = role
return aws_credentials
|
#! /usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import with_statement
import os
import re
import distutils.version
import sys, string
import subprocess
import time
import getopt
import roslib
import roslib.rospack
import roslib.rosenv
import roslib.stacks
import threading
import math
import parallel_build
from optparse import OptionParser
import rosdep
class RosMakeAll:
def __init__(self):
self.result = {}
self.paths = {}
self.dependency_tracker = parallel_build.DependencyTracker()
self.flag_tracker = parallel_build.PackageFlagTracker(self.dependency_tracker)
self.output = {}
self.verbose = False
self.full_verbose = False
self.profile = {}
self.ros_parallel_jobs = parallel_build.num_cpus()
self.build_list = []
self.start_time = time.time()
self.log_dir = ""
self.logging_enabled = True
def num_packages_built(self):
return len(self.result[argument].keys())
def get_path(self, package):
if not package in self.paths:
self.paths[package] = roslib.packages.get_pkg_dir(package)
return self.paths[package]
def check_rosdep(self, packages):
r = rosdep.core.Rosdep(packages, robust=True)
output = r.check()
if len(output) == 0:
#print "Rosdep check passed all packages:", packages
return True
else:
print "Rosdep check failed packages:", output
return False
def install_rosdeps(self, packages, default_yes):
r = rosdep.core.Rosdep(packages, robust=True)
try:
r.install(include_duplicates=False, default_yes=default_yes);
return True
except rosdep.RosdepException, e:
print "ERROR: %s"%e
return False
def build_or_recurse(self,p):
if p in self.build_list:
return
for d in self.dependency_tracker.get_deps_1(p):
self.build_or_recurse(d)
try: # append it ot the list only if present
self.get_path(p)
self.build_list.append(p)
except roslib.packages.InvalidROSPkgException, ex:
if not self.robust_build:
self.print_all("Exiting due to missing package: %s"%ex)
sys.exit(-1)
else:
self.print_all("!"*20 + " Package %s does not exist. %s"%(p, ex) + "!"*20)
def parallel_build_pkgs(self, build_queue, argument = None, threads = 1):
self.profile[argument] = {}
self.output[argument] = {}
self.result[argument] = {}
cts = []
for i in xrange(0, threads):
ct = parallel_build.CompileThread(str(i), build_queue, self, argument)
ct.start()
cts.append(ct)
for ct in cts:
try:
ct.join()
#print "naturally ended thread", ct
except KeyboardInterrupt:
self.print_all( "Caught KeyboardInterrupt. Stopping build.")
build_queue.stop()
ct.join()
pass
all_pkgs_passed = True
for v in self.result[argument].values():
all_pkgs_passed = v and all_pkgs_passed
build_passed = build_queue.succeeded() and all_pkgs_passed
return build_passed
def build_package(self, package, argument=None):
local_env = os.environ.copy()
local_env['ROS_PARALLEL_JOBS'] = "-j%d" % self.ros_parallel_jobs
local_env['SVN_CMDLINE'] = "svn --non-interactive"
cmd = ["bash", "-c", "cd %s && make "%self.get_path(package) ]
if argument:
cmd[-1] += argument
self.print_full_verbose (cmd)
command_line = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=local_env)
(pstd_out, pstd_err) = command_line.communicate() # pstd_err should be None due to pipe above
return (command_line.returncode, pstd_out)
def build(self, p, argument = None, robust_build=False):
return_string = ""
try:
if p == "rospack":
return_string = ("[SKIP] rosmake uses rospack. If building it is already built, if cleaning it will be cleaned at the end.")
return (True, return_string) # This will be caught later
# warn if ROS_BUILD_BLACKLIST encountered if applicable
if not self.skip_blacklist and self.flag_tracker.is_blacklisted(p):
self.print_all ("!"*20 + " Building package %s. ROS_BUILD_BLACKLIST ENCOUNTERED in package(s): %s --- TRYING TO BUILD ANYWAY"%(p, self.flag_tracker.is_blacklisted(p)) + "!"*20)
if self.skip_blacklist and self.flag_tracker.is_blacklisted(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_BUILD_BLACKLIST in %s"%self.flag_tracker.is_blacklisted(p))
self.output[argument][p] = "ROS_BUILD_BLACKLIST"
elif self.skip_blacklist_osx and self.flag_tracker.is_blacklisted_osx(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_BUILD_BLACKLIST_OSX")
self.output[argument][p] = "ROS_BUILD_BLACKLIST_OSX"
elif self.flag_tracker.has_nobuild(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_NOBUILD")
self.output[argument][p] = "ROS_NOBUILD"
elif not self.flag_tracker.has_makefile(p):
self.result[argument][p] = True
return_string = ("[SKIP] due do to no Makefile")
self.output[argument][p] = "No Makefile Present"
else:
start_time = time.time()
(returncode, pstd_out) = self.build_package(p, argument)
self.profile[argument][p] = time.time() - start_time
self.output[argument][p] = pstd_out
if argument:
log_type = "build_%s"%argument
else:
log_type = "build"
if not returncode:
self.print_full_verbose( pstd_out)
self.result[argument][p] = True
num_warnings = len(re.findall("warning:", pstd_out))
if num_warnings > 0:
return_string = ("[PASS] [ %.2f seconds ] -- WARNING: %d compiler warnings"%(self.profile[argument][p], num_warnings))
else:
return_string = ("[PASS] [ %.2f seconds ]"%( self.profile[argument][p]))
self.output_to_file(p, log_type, pstd_out, num_warnings > 0)
else:
no_target = len(re.findall("No rule to make target", pstd_out)) > 0
interrupt = len(re.findall("Interrupt", pstd_out)) > 0
if no_target:
return_string = ( "[SKIP] No rule to make target %s"%( argument))
elif interrupt:
return_string = ("[Interrupted]" )
else:
return_string = ( "[FAIL] [ %.2f seconds ]"%( self.profile[argument][p]))
self.result[argument][p] = no_target
if self.robust_build or interrupt:
self.print_verbose( pstd_out)
else:
self.print_tail( pstd_out)
self.output_to_file(p, log_type, pstd_out, always_print= not (no_target or interrupt))
return (False, return_string)
return (True, return_string) # this means that we didn't error in any case above
except roslib.packages.InvalidROSPkgException, ex:
self.result[argument][p] = False
self.print_verbose ("[SKIP] Package not found\n")
self.output[argument][p] = "Package not found %s"%ex
return (False, return_string)
def output_to_file(self, package, log_type, stdout, always_print= False):
if not self.logging_enabled:
return
package_log_dir = os.path.join(self.log_dir, package)
std_out_filename = os.path.join(package_log_dir, log_type + "_output.log")
if not os.path.exists (package_log_dir):
os.makedirs (package_log_dir)
with open(std_out_filename, 'w') as stdout_file:
stdout_file.write(stdout)
print_string = "Output from build of package %s written to:\n[ rosmake ] %s"%(package, std_out_filename)
if always_print:
self.print_all(print_string)
else:
self.print_full_verbose(print_string)
def generate_summary_output(self, log_dir):
if not self.logging_enabled:
return
self.print_all("Summary output to directory")
self.print_all("%s"%self.log_dir)
if None in self.result.keys():
if len(self.result[None].keys()) > 0:
buildfail_filename = os.path.join(log_dir, "buildfailures.txt")
with open(buildfail_filename, 'w') as bf:
bf.write("Build failures:\n")
for key in self.build_list:
if key in self.result[None].keys() and self.result[None][key] == False:
bf.write("%s\n"%key)
if None in self.output.keys():
buildfail_context_filename = os.path.join(log_dir, "buildfailures-with-context.txt")
with open(buildfail_context_filename, 'w') as bfwc:
bfwc.write("Build failures with context:\n")
for key in self.build_list:
if key in self.result[None].keys() and self.result[None][key] == False:
bfwc.write("---------------------\n")
bfwc.write("%s\n"%key)
if key in self.output[None]:
bfwc.write(self.output[None][key])
if "test" in self.result.keys():
if len(self.result["test"].keys()) > 0:
testfail_filename = os.path.join(log_dir, "testfailures.txt")
with open(testfail_filename, 'w') as btwc:
btwc.write("Test failures:\n")
for key in self.build_list:
if key in self.result["test"].keys() and self.result["test"][key] == False:
btwc.write("%s\n"%key)
if "test" in self.output.keys():
testfail_filename = os.path.join(log_dir, "testfailures-with-context.txt")
with open(testfail_filename, 'w') as btwc:
btwc.write("Test failures with context:\n")
for key in self.build_list:
if key in self.result["test"].keys() and self.result["test"][key] == False:
btwc.write("%s\n"%key)
if key in self.output["test"]:
btwc.write(self.output["test"][key])
profile_filename = os.path.join(log_dir, "profile.txt")
with open(profile_filename, 'w') as pf:
pf.write(self.get_profile_string())
def get_profile_string(self):
output = '--------------\nProfile\n--------------\n'
total = 0.0
count = 1
for key in self.build_list:
build_results = ["[Not Built ]", "[ Built ]", "[Build Fail]"];
test_results = ["[Untested ]", "[Test Pass]", "[Test Fail]"];
build_result = 0
test_result = 0
test_time = 0.0
build_time = 0.0
if None in self.result.keys():
if key in self.result[None].keys():
if self.result[None][key] == True:
build_result = 1
else:
build_result = 2
if "test" in self.profile.keys():
if key in self.result["test"].keys():
if self.result["test"][key] == True:
test_result = 1
else:
test_result = 2
if None in self.profile.keys():
if key in self.profile[None].keys():
build_time = self.profile[None][key]
if "test" in self.profile.keys():
if key in self.profile["test"].keys():
test_time = self.profile["test"][key]
output = output + "%3d: %s in %d:%.2f %s in %.2f --- %s\n"% (count, build_results[build_result], math.floor(build_time/60), build_time%60 , test_results[test_result], test_time, key)
total = total + build_time
count = count + 1
elapsed_time = self.finish_time - self.start_time
output = output + "----------------\n" + "%.2f Cumulative, %.2f Elapsed, %.2f Speedup \n"%(total, elapsed_time, float(total) / float(elapsed_time))
return output
def print_all(self, s, newline = True, thread_name=None):
if thread_name == None:
if newline:
print "[ rosmake ]", s
else:
print "[ rosmake ]", s,
sys.stdout.flush()
else:
if newline:
print "[rosmake-%s]"%thread_name, s
else:
print "[rosmake-%s]"%thread_name, s
sys.stdout.flush()
def print_verbose(self, s, thread_name=None):
if self.verbose or self.full_verbose:
if thread_name:
self.print_all(s, thread_name=thread_name)
else:
print "[ rosmake ]", s
def print_full_verbose(self, s):
if self.full_verbose:
print "[ rosmake ] ", s
def print_tail(self, s, tail_lines=40):
lines = s.splitlines()
num_lines = min(len(lines), tail_lines)
if num_lines == tail_lines:
print "[ rosmake ] Last %d lines"%num_lines
else:
print "[ rosmake ] All %d lines"%num_lines
print "{" + "-"*79
for l in xrange(-num_lines, -1):
print " %s"%lines[l]
print "-"*79 + "}"
def assert_rospack_built(self):
if self.flag_tracker.has_nobuild("rospack"):
return True
ret_val = subprocess.call(["bash", "-c", "cd %s && make "%os.path.join(os.environ["ROS_ROOT"], "tools/rospack")])
ret_val2 = subprocess.call(["bash", "-c", "cd %s && make "%os.path.join(os.environ["ROS_ROOT"], "3rdparty/gtest")])
return ret_val and ret_val2
# The check for presence doesn't check for updates
#if os.path.exists(os.path.join(os.environ["ROS_ROOT"], "bin/rospack")):
# return True
#else:
# print "Rosmake detected that rospack was not built. Building it for you because it is required."
# return subprocess.call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack")])
def is_rosout_built(self):
return os.path.exists(os.path.join(roslib.packages.get_pkg_dir("rosout"), "rosout"))
def main(self):
parser = OptionParser(usage="usage: %prog [options] COMMAND PACKAGE LIST", prog='rosmake')
parser.add_option("--test-only", dest="test_only", default=False,
action="store_true", help="only run tests")
parser.add_option("-t", dest="test", default=False,
action="store_true", help="build and test packages")
parser.add_option("-a", "--all", dest="build_all", default=False,
action="store_true", help="select all packages")
parser.add_option("-i", "--mark-installed", dest="mark_installed", default=False,
action="store_true", help="On successful build, mark packages as installed with ROS_NOBUILD")
parser.add_option("-u", "--unmark-installed", dest="unmark_installed", default=False,
action="store_true", help="Remove ROS_NOBUILD from the specified packages. This will not build anything.")
parser.add_option("-v", dest="verbose", default=False,
action="store_true", help="display errored builds")
parser.add_option("-r","-k", "--robust", dest="robust", default=False,
action="store_true", help="do not stop build on error")
parser.add_option("-V", dest="full_verbose", default=False,
action="store_true", help="display all builds")
parser.add_option("-s", "--specified-only", dest="specified_only", default=False,
action="store_true", help="only build packages specified on the command line")
parser.add_option("--buildtest", dest="buildtest",
action="append", help="package to buildtest")
parser.add_option("--buildtest1", dest="buildtest1",
action="append", help="package to buildtest1")
parser.add_option("--output", dest="output_dir",
action="store", help="where to output results")
parser.add_option("--pre-clean", dest="pre_clean",
action="store_true", help="run make clean first")
parser.add_option("--disable-logging", dest="logging_enabled", default=True,
action="store_false", help="turn off all logs")
parser.add_option("--target", dest="target",
action="store", help="run make with this target")
parser.add_option("--pjobs", dest="ros_parallel_jobs", type="int",
action="store", help="run make with this N jobs '-j=N'")
parser.add_option("--threads", dest="threads", type="int", default = parallel_build.num_cpus(),
action="store", help="Build up to N packages in parallel")
parser.add_option("--profile", dest="print_profile", default=False,
action="store_true", help="print time profile after build")
parser.add_option("--skip-blacklist", dest="skip_blacklist",
default=False, action="store_true",
help="skip packages containing a file called ROS_BUILD_BLACKLIST (Default behavior will ignore the presence of ROS_BUILD_BLACKLIST)")
parser.add_option("--skip-blacklist-osx", dest="skip_blacklist_osx",
default=False, action="store_true",
help="skip packages containing a file called ROS_BUILD_BLACKLIST_OSX (Default behavior will ignore the presence of ROS_BUILD_BLACKLIST_OSX)")
parser.add_option("--rosdep-install", dest="rosdep_install",
action="store_true", help="call rosdep install before running")
parser.add_option("--rosdep-yes", dest="rosdep_yes",
action="store_true", help="call rosdep install with default yes argument")
options, args = parser.parse_args()
testing = False
building = True
if options.test_only:
testing = True
building = False
elif options.test:
testing = True
if options.ros_parallel_jobs:
self.ros_parallel_jobs = options.ros_parallel_jobs
self.robust_build = options.robust
self.threads = options.threads
self.skip_blacklist = options.skip_blacklist
self.skip_blacklist_osx = options.skip_blacklist_osx
self.logging_enabled = options.logging_enabled
# pass through verbosity options
self.full_verbose = options.full_verbose
self.verbose = options.verbose
packages = []
#load packages from arguments
if options.build_all:
packages = roslib.packages.list_pkgs()
self.print_all( "Building all packages")
else: # no need to extend if all already selected
if options.buildtest:
for p in options.buildtest:
packages.extend(roslib.rospack.rospack_depends_on(p))
self.print_all( "buildtest requested for package %s adding it and all dependent packages: "%p)
if options.buildtest1:
for p in options.buildtest1:
packages.extend(roslib.rospack.rospack_depends_on_1(p))
self.print_all( "buildtest1 requested for package %s adding it and all depends-on1 packages: "%p)
if len(packages) == 0 and len(args) == 0:
p = os.path.basename(os.path.abspath('.'))
try:
if (os.path.samefile(roslib.packages.get_pkg_dir(p), '.')):
packages = [p]
self.print_all( "No package specified. Building %s"%packages)
else:
self.print_all("No package selected and the current directory is not the correct path for package '%s'."%p)
except roslib.packages.InvalidROSPkgException, ex:
try:
if (roslib.stacks.get_stack_dir(p) == os.path.abspath('.')):
packages = [p]
self.print_all( "No package specified. Building stack %s"%packages)
else:
self.print_all("No stack selected and the current directory is not the correct path for stack '%s'."%p)
except roslib.stacks.InvalidROSStackException, ex2:
self.print_all("No package or stack specified. And current directory '%s' is not a package name or stack name."%p)
#sys.exit(-1)
else:
packages.extend(args)
if not self.is_rosout_built():
packages.append("rosout")
self.print_all("Detected rosout not built, adding it to the build")
self.print_all( "Packages requested are: %s"%packages)
# Setup logging
if self.logging_enabled:
date_time_stamp = "rosmake_output-" + time.strftime("%Y%m%d-%H%M%S")
if options.output_dir:
#self.log_dir = os.path.join(os.getcwd(), options.output_dir, date_time_stamp);
self.log_dir = os.path.abspath(options.output_dir)
else:
self.log_dir = os.path.join(roslib.rosenv.get_ros_home(), "rosmake", date_time_stamp);
self.print_all("Logging to directory")
self.print_all("%s"%self.log_dir)
if os.path.exists (self.log_dir) and not os.path.isdir(self.log_dir):
self.print_all( "Log destination %s is a file; please remove it or choose a new destination"%self.log_dir)
sys.exit(1)
if not os.path.exists (self.log_dir):
os.makedirs (self.log_dir)
(specified_packages, rejected_packages) = roslib.stacks.expand_to_packages(packages)
self.print_all("Expanded args %s to:\n%s"%(packages, specified_packages))
if rejected_packages:
self.print_all("WARNING: The following args could not be parsed as stacks or packages: %s"%rejected_packages)
if len(specified_packages) == 0:
self.print_all("ERROR: No arguments could be parsed into valid package or stack names.")
return False
# make sure all dependencies are satisfied and if not warn
if options.rosdep_install:
self.print_all("Generating Install Script using rosdep")
self.install_rosdeps(specified_packages, options.rosdep_yes)
else:
self.print_all("Checking rosdeps compliance for packages")
self.check_rosdep(specified_packages)
if options.unmark_installed:
for p in specified_packages:
if self.flag_tracker.remove_nobuild(p):
self.print_all("Removed ROS_NOBUILD from %s"%p)
return True
required_packages = specified_packages[:]
# these packages are not in the dependency tree but are needed they only cost 0.01 seconds to build
if "paramiko" not in specified_packages:
required_packages.append("paramiko")
if "pycrypto" not in specified_packages:
required_packages.append("pycrypto")
#generate the list of packages necessary to build(in order of dependencies)
counter = 0
for p in required_packages:
counter = counter + 1
self.print_verbose( "Processing %s and all dependencies(%d of %d requested)"%(p, counter, len(packages)))
self.build_or_recurse(p)
# remove extra packages if specified-only flag is set
if options.specified_only:
new_list = []
for pkg in self.build_list:
if pkg in specified_packages:
new_list.append(pkg)
self.dependency_tracker = parallel_build.DependencyTracker(specified_packages) # this will make the tracker only respond to packages in the list
self.print_all("specified-only option was used, only building packages %s"%new_list)
self.build_list = new_list
if options.pre_clean:
build_queue = parallel_build.BuildQueue(self.build_list, self.dependency_tracker, robust_build = True)
self.parallel_build_pkgs(build_queue, "clean", threads = options.threads)
if "rospack" in self.build_list:
self.print_all( "Rosmake detected that rospack was requested to be cleaned. Cleaning it for it was skipped earlier.")
subprocess.check_call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack"), "clean"])
if building:
self.assert_rospack_built()
build_passed = True
if building:
self.print_verbose ("Building packages %s"% self.build_list)
build_queue = parallel_build.BuildQueue(self.build_list, self.dependency_tracker, robust_build = options.robust)
build_passed = self.parallel_build_pkgs(build_queue, options.target, threads = options.threads)
if "rospack" in self.build_list and options.target == "clean":
self.print_all( "Rosmake detected that rospack was requested to be cleaned. Cleaning it, because it was skipped earlier.")
subprocess.check_call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack"), "clean"])
tests_passed = True
if build_passed and testing:
self.print_verbose ("Testing packages %s"% packages)
build_queue = parallel_build.BuildQueue(specified_packages, parallel_build.DependencyTracker(specified_packages), robust_build = True)
tests_passed = self.parallel_build_pkgs(build_queue, "test", threads = 1)
if options.mark_installed:
if build_passed and tests_passed:
for p in specified_packages:
if self.flag_tracker.add_nobuild(p):
self.print_all("Marking %s as installed with a ROS_NOBUILD file"%p)
else:
self.print_all("All builds and tests did not pass cannot mark packages as installed. ")
self.finish_time = time.time() #note: before profiling
self.generate_summary_output(self.log_dir)
if options.print_profile:
self.print_all (self.get_profile_string())
return build_passed and tests_passed
adding no-rosdep arg to turn off checking
#! /usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com
from __future__ import with_statement
import os
import re
import distutils.version
import sys, string
import subprocess
import time
import getopt
import roslib
import roslib.rospack
import roslib.rosenv
import roslib.stacks
import threading
import math
import parallel_build
from optparse import OptionParser
import rosdep
class RosMakeAll:
def __init__(self):
self.result = {}
self.paths = {}
self.dependency_tracker = parallel_build.DependencyTracker()
self.flag_tracker = parallel_build.PackageFlagTracker(self.dependency_tracker)
self.output = {}
self.verbose = False
self.full_verbose = False
self.profile = {}
self.ros_parallel_jobs = parallel_build.num_cpus()
self.build_list = []
self.start_time = time.time()
self.log_dir = ""
self.logging_enabled = True
def num_packages_built(self):
return len(self.result[argument].keys())
def get_path(self, package):
if not package in self.paths:
self.paths[package] = roslib.packages.get_pkg_dir(package)
return self.paths[package]
def check_rosdep(self, packages):
self.print_all("Checking rosdeps compliance for packages %s. This may take a minute.")
r = rosdep.core.Rosdep(packages, robust=True)
output = r.check()
if len(output) == 0:
self.print_all( "Rosdep check passed all packages")# %s"% packages)
return True
else:
self.print_all("Rosdep check failed packages: %s"% output)
return False
def install_rosdeps(self, packages, default_yes):
self.print_all("Generating Install Script using rosdep then executing. This may take a minute, you will be prompted for permissions. . .")
r = rosdep.core.Rosdep(packages, robust=True)
try:
r.install(include_duplicates=False, default_yes=default_yes);
self.print_all("Rosdep successfully installed all packages")
return True
except rosdep.RosdepException, e:
self.print_all( "ERROR: %s"%e)
return False
def build_or_recurse(self,p):
if p in self.build_list:
return
for d in self.dependency_tracker.get_deps_1(p):
self.build_or_recurse(d)
try: # append it ot the list only if present
self.get_path(p)
self.build_list.append(p)
except roslib.packages.InvalidROSPkgException, ex:
if not self.robust_build:
self.print_all("Exiting due to missing package: %s"%ex)
sys.exit(-1)
else:
self.print_all("!"*20 + " Package %s does not exist. %s"%(p, ex) + "!"*20)
def parallel_build_pkgs(self, build_queue, argument = None, threads = 1):
self.profile[argument] = {}
self.output[argument] = {}
self.result[argument] = {}
cts = []
for i in xrange(0, threads):
ct = parallel_build.CompileThread(str(i), build_queue, self, argument)
ct.start()
cts.append(ct)
for ct in cts:
try:
ct.join()
#print "naturally ended thread", ct
except KeyboardInterrupt:
self.print_all( "Caught KeyboardInterrupt. Stopping build.")
build_queue.stop()
ct.join()
pass
all_pkgs_passed = True
for v in self.result[argument].values():
all_pkgs_passed = v and all_pkgs_passed
build_passed = build_queue.succeeded() and all_pkgs_passed
return build_passed
def build_package(self, package, argument=None):
local_env = os.environ.copy()
local_env['ROS_PARALLEL_JOBS'] = "-j%d" % self.ros_parallel_jobs
local_env['SVN_CMDLINE'] = "svn --non-interactive"
cmd = ["bash", "-c", "cd %s && make "%self.get_path(package) ]
if argument:
cmd[-1] += argument
self.print_full_verbose (cmd)
command_line = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=local_env)
(pstd_out, pstd_err) = command_line.communicate() # pstd_err should be None due to pipe above
return (command_line.returncode, pstd_out)
def build(self, p, argument = None, robust_build=False):
return_string = ""
try:
if p == "rospack":
return_string = ("[SKIP] rosmake uses rospack. If building it is already built, if cleaning it will be cleaned at the end.")
return (True, return_string) # This will be caught later
# warn if ROS_BUILD_BLACKLIST encountered if applicable
if not self.skip_blacklist and self.flag_tracker.is_blacklisted(p):
self.print_all ("!"*20 + " Building package %s. ROS_BUILD_BLACKLIST ENCOUNTERED in package(s): %s --- TRYING TO BUILD ANYWAY"%(p, self.flag_tracker.is_blacklisted(p)) + "!"*20)
if self.skip_blacklist and self.flag_tracker.is_blacklisted(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_BUILD_BLACKLIST in %s"%self.flag_tracker.is_blacklisted(p))
self.output[argument][p] = "ROS_BUILD_BLACKLIST"
elif self.skip_blacklist_osx and self.flag_tracker.is_blacklisted_osx(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_BUILD_BLACKLIST_OSX")
self.output[argument][p] = "ROS_BUILD_BLACKLIST_OSX"
elif self.flag_tracker.has_nobuild(p):
self.result[argument][p] = True
return_string = ("[SKIP] due to ROS_NOBUILD")
self.output[argument][p] = "ROS_NOBUILD"
elif not self.flag_tracker.has_makefile(p):
self.result[argument][p] = True
return_string = ("[SKIP] due do to no Makefile")
self.output[argument][p] = "No Makefile Present"
else:
start_time = time.time()
(returncode, pstd_out) = self.build_package(p, argument)
self.profile[argument][p] = time.time() - start_time
self.output[argument][p] = pstd_out
if argument:
log_type = "build_%s"%argument
else:
log_type = "build"
if not returncode:
self.print_full_verbose( pstd_out)
self.result[argument][p] = True
num_warnings = len(re.findall("warning:", pstd_out))
if num_warnings > 0:
return_string = ("[PASS] [ %.2f seconds ] -- WARNING: %d compiler warnings"%(self.profile[argument][p], num_warnings))
else:
return_string = ("[PASS] [ %.2f seconds ]"%( self.profile[argument][p]))
self.output_to_file(p, log_type, pstd_out, num_warnings > 0)
else:
no_target = len(re.findall("No rule to make target", pstd_out)) > 0
interrupt = len(re.findall("Interrupt", pstd_out)) > 0
if no_target:
return_string = ( "[SKIP] No rule to make target %s"%( argument))
elif interrupt:
return_string = ("[Interrupted]" )
else:
return_string = ( "[FAIL] [ %.2f seconds ]"%( self.profile[argument][p]))
self.result[argument][p] = no_target
if self.robust_build or interrupt:
self.print_verbose( pstd_out)
else:
self.print_tail( pstd_out)
self.output_to_file(p, log_type, pstd_out, always_print= not (no_target or interrupt))
return (False, return_string)
return (True, return_string) # this means that we didn't error in any case above
except roslib.packages.InvalidROSPkgException, ex:
self.result[argument][p] = False
self.print_verbose ("[SKIP] Package not found\n")
self.output[argument][p] = "Package not found %s"%ex
return (False, return_string)
def output_to_file(self, package, log_type, stdout, always_print= False):
if not self.logging_enabled:
return
package_log_dir = os.path.join(self.log_dir, package)
std_out_filename = os.path.join(package_log_dir, log_type + "_output.log")
if not os.path.exists (package_log_dir):
os.makedirs (package_log_dir)
with open(std_out_filename, 'w') as stdout_file:
stdout_file.write(stdout)
print_string = "Output from build of package %s written to:\n[ rosmake ] %s"%(package, std_out_filename)
if always_print:
self.print_all(print_string)
else:
self.print_full_verbose(print_string)
def generate_summary_output(self, log_dir):
if not self.logging_enabled:
return
self.print_all("Summary output to directory")
self.print_all("%s"%self.log_dir)
if None in self.result.keys():
if len(self.result[None].keys()) > 0:
buildfail_filename = os.path.join(log_dir, "buildfailures.txt")
with open(buildfail_filename, 'w') as bf:
bf.write("Build failures:\n")
for key in self.build_list:
if key in self.result[None].keys() and self.result[None][key] == False:
bf.write("%s\n"%key)
if None in self.output.keys():
buildfail_context_filename = os.path.join(log_dir, "buildfailures-with-context.txt")
with open(buildfail_context_filename, 'w') as bfwc:
bfwc.write("Build failures with context:\n")
for key in self.build_list:
if key in self.result[None].keys() and self.result[None][key] == False:
bfwc.write("---------------------\n")
bfwc.write("%s\n"%key)
if key in self.output[None]:
bfwc.write(self.output[None][key])
if "test" in self.result.keys():
if len(self.result["test"].keys()) > 0:
testfail_filename = os.path.join(log_dir, "testfailures.txt")
with open(testfail_filename, 'w') as btwc:
btwc.write("Test failures:\n")
for key in self.build_list:
if key in self.result["test"].keys() and self.result["test"][key] == False:
btwc.write("%s\n"%key)
if "test" in self.output.keys():
testfail_filename = os.path.join(log_dir, "testfailures-with-context.txt")
with open(testfail_filename, 'w') as btwc:
btwc.write("Test failures with context:\n")
for key in self.build_list:
if key in self.result["test"].keys() and self.result["test"][key] == False:
btwc.write("%s\n"%key)
if key in self.output["test"]:
btwc.write(self.output["test"][key])
profile_filename = os.path.join(log_dir, "profile.txt")
with open(profile_filename, 'w') as pf:
pf.write(self.get_profile_string())
def get_profile_string(self):
output = '--------------\nProfile\n--------------\n'
total = 0.0
count = 1
for key in self.build_list:
build_results = ["[Not Built ]", "[ Built ]", "[Build Fail]"];
test_results = ["[Untested ]", "[Test Pass]", "[Test Fail]"];
build_result = 0
test_result = 0
test_time = 0.0
build_time = 0.0
if None in self.result.keys():
if key in self.result[None].keys():
if self.result[None][key] == True:
build_result = 1
else:
build_result = 2
if "test" in self.profile.keys():
if key in self.result["test"].keys():
if self.result["test"][key] == True:
test_result = 1
else:
test_result = 2
if None in self.profile.keys():
if key in self.profile[None].keys():
build_time = self.profile[None][key]
if "test" in self.profile.keys():
if key in self.profile["test"].keys():
test_time = self.profile["test"][key]
output = output + "%3d: %s in %d:%.2f %s in %.2f --- %s\n"% (count, build_results[build_result], math.floor(build_time/60), build_time%60 , test_results[test_result], test_time, key)
total = total + build_time
count = count + 1
elapsed_time = self.finish_time - self.start_time
output = output + "----------------\n" + "%.2f Cumulative, %.2f Elapsed, %.2f Speedup \n"%(total, elapsed_time, float(total) / float(elapsed_time))
return output
def print_all(self, s, newline = True, thread_name=None):
if thread_name == None:
if newline:
print "[ rosmake ]", s
else:
print "[ rosmake ]", s,
sys.stdout.flush()
else:
if newline:
print "[rosmake-%s]"%thread_name, s
else:
print "[rosmake-%s]"%thread_name, s
sys.stdout.flush()
def print_verbose(self, s, thread_name=None):
if self.verbose or self.full_verbose:
if thread_name:
self.print_all(s, thread_name=thread_name)
else:
print "[ rosmake ]", s
def print_full_verbose(self, s):
if self.full_verbose:
print "[ rosmake ] ", s
def print_tail(self, s, tail_lines=40):
lines = s.splitlines()
num_lines = min(len(lines), tail_lines)
if num_lines == tail_lines:
print "[ rosmake ] Last %d lines"%num_lines
else:
print "[ rosmake ] All %d lines"%num_lines
print "{" + "-"*79
for l in xrange(-num_lines, -1):
print " %s"%lines[l]
print "-"*79 + "}"
def assert_rospack_built(self):
if self.flag_tracker.has_nobuild("rospack"):
return True
ret_val = subprocess.call(["bash", "-c", "cd %s && make "%os.path.join(os.environ["ROS_ROOT"], "tools/rospack")])
ret_val2 = subprocess.call(["bash", "-c", "cd %s && make "%os.path.join(os.environ["ROS_ROOT"], "3rdparty/gtest")])
return ret_val and ret_val2
# The check for presence doesn't check for updates
#if os.path.exists(os.path.join(os.environ["ROS_ROOT"], "bin/rospack")):
# return True
#else:
# print "Rosmake detected that rospack was not built. Building it for you because it is required."
# return subprocess.call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack")])
def is_rosout_built(self):
return os.path.exists(os.path.join(roslib.packages.get_pkg_dir("rosout"), "rosout"))
def main(self):
parser = OptionParser(usage="usage: %prog [options] COMMAND PACKAGE LIST", prog='rosmake')
parser.add_option("--test-only", dest="test_only", default=False,
action="store_true", help="only run tests")
parser.add_option("-t", dest="test", default=False,
action="store_true", help="build and test packages")
parser.add_option("-a", "--all", dest="build_all", default=False,
action="store_true", help="select all packages")
parser.add_option("-i", "--mark-installed", dest="mark_installed", default=False,
action="store_true", help="On successful build, mark packages as installed with ROS_NOBUILD")
parser.add_option("-u", "--unmark-installed", dest="unmark_installed", default=False,
action="store_true", help="Remove ROS_NOBUILD from the specified packages. This will not build anything.")
parser.add_option("-v", dest="verbose", default=False,
action="store_true", help="display errored builds")
parser.add_option("-r","-k", "--robust", dest="robust", default=False,
action="store_true", help="do not stop build on error")
parser.add_option("-V", dest="full_verbose", default=False,
action="store_true", help="display all builds")
parser.add_option("-s", "--specified-only", dest="specified_only", default=False,
action="store_true", help="only build packages specified on the command line")
parser.add_option("--buildtest", dest="buildtest",
action="append", help="package to buildtest")
parser.add_option("--buildtest1", dest="buildtest1",
action="append", help="package to buildtest1")
parser.add_option("--output", dest="output_dir",
action="store", help="where to output results")
parser.add_option("--pre-clean", dest="pre_clean",
action="store_true", help="run make clean first")
parser.add_option("--disable-logging", dest="logging_enabled", default=True,
action="store_false", help="turn off all logs")
parser.add_option("--target", dest="target",
action="store", help="run make with this target")
parser.add_option("--pjobs", dest="ros_parallel_jobs", type="int",
action="store", help="run make with this N jobs '-j=N'")
parser.add_option("--threads", dest="threads", type="int", default = parallel_build.num_cpus(),
action="store", help="Build up to N packages in parallel")
parser.add_option("--profile", dest="print_profile", default=False,
action="store_true", help="print time profile after build")
parser.add_option("--skip-blacklist", dest="skip_blacklist",
default=False, action="store_true",
help="skip packages containing a file called ROS_BUILD_BLACKLIST (Default behavior will ignore the presence of ROS_BUILD_BLACKLIST)")
parser.add_option("--skip-blacklist-osx", dest="skip_blacklist_osx",
default=False, action="store_true",
help="skip packages containing a file called ROS_BUILD_BLACKLIST_OSX (Default behavior will ignore the presence of ROS_BUILD_BLACKLIST_OSX)")
parser.add_option("--rosdep-install", dest="rosdep_install",
action="store_true", help="call rosdep install before running")
parser.add_option("--rosdep-yes", dest="rosdep_yes",
action="store_true", help="call rosdep install with default yes argument")
parser.add_option("--no-rosdep", dest="rosdep_disabled",
action="store_true", help="disable the default check of rosdep")
options, args = parser.parse_args()
testing = False
building = True
if options.test_only:
testing = True
building = False
elif options.test:
testing = True
if options.ros_parallel_jobs:
self.ros_parallel_jobs = options.ros_parallel_jobs
self.robust_build = options.robust
self.threads = options.threads
self.skip_blacklist = options.skip_blacklist
self.skip_blacklist_osx = options.skip_blacklist_osx
self.logging_enabled = options.logging_enabled
# pass through verbosity options
self.full_verbose = options.full_verbose
self.verbose = options.verbose
packages = []
#load packages from arguments
if options.build_all:
packages = roslib.packages.list_pkgs()
self.print_all( "Building all packages")
else: # no need to extend if all already selected
if options.buildtest:
for p in options.buildtest:
packages.extend(roslib.rospack.rospack_depends_on(p))
self.print_all( "buildtest requested for package %s adding it and all dependent packages: "%p)
if options.buildtest1:
for p in options.buildtest1:
packages.extend(roslib.rospack.rospack_depends_on_1(p))
self.print_all( "buildtest1 requested for package %s adding it and all depends-on1 packages: "%p)
if len(packages) == 0 and len(args) == 0:
p = os.path.basename(os.path.abspath('.'))
try:
if (os.path.samefile(roslib.packages.get_pkg_dir(p), '.')):
packages = [p]
self.print_all( "No package specified. Building %s"%packages)
else:
self.print_all("No package selected and the current directory is not the correct path for package '%s'."%p)
except roslib.packages.InvalidROSPkgException, ex:
try:
if (roslib.stacks.get_stack_dir(p) == os.path.abspath('.')):
packages = [p]
self.print_all( "No package specified. Building stack %s"%packages)
else:
self.print_all("No stack selected and the current directory is not the correct path for stack '%s'."%p)
except roslib.stacks.InvalidROSStackException, ex2:
self.print_all("No package or stack specified. And current directory '%s' is not a package name or stack name."%p)
#sys.exit(-1)
else:
packages.extend(args)
if not self.is_rosout_built():
packages.append("rosout")
self.print_all("Detected rosout not built, adding it to the build")
self.print_all( "Packages requested are: %s"%packages)
# Setup logging
if self.logging_enabled:
date_time_stamp = "rosmake_output-" + time.strftime("%Y%m%d-%H%M%S")
if options.output_dir:
#self.log_dir = os.path.join(os.getcwd(), options.output_dir, date_time_stamp);
self.log_dir = os.path.abspath(options.output_dir)
else:
self.log_dir = os.path.join(roslib.rosenv.get_ros_home(), "rosmake", date_time_stamp);
self.print_all("Logging to directory")
self.print_all("%s"%self.log_dir)
if os.path.exists (self.log_dir) and not os.path.isdir(self.log_dir):
self.print_all( "Log destination %s is a file; please remove it or choose a new destination"%self.log_dir)
sys.exit(1)
if not os.path.exists (self.log_dir):
os.makedirs (self.log_dir)
(specified_packages, rejected_packages) = roslib.stacks.expand_to_packages(packages)
self.print_all("Expanded args %s to:\n%s"%(packages, specified_packages))
if rejected_packages:
self.print_all("WARNING: The following args could not be parsed as stacks or packages: %s"%rejected_packages)
if len(specified_packages) == 0:
self.print_all("ERROR: No arguments could be parsed into valid package or stack names.")
return False
# make sure all dependencies are satisfied and if not warn
if options.rosdep_install:
self.install_rosdeps(specified_packages, options.rosdep_yes)
elif not options.rosdep_disabled:
self.check_rosdep(specified_packages)
if options.unmark_installed:
for p in specified_packages:
if self.flag_tracker.remove_nobuild(p):
self.print_all("Removed ROS_NOBUILD from %s"%p)
return True
required_packages = specified_packages[:]
# these packages are not in the dependency tree but are needed they only cost 0.01 seconds to build
if "paramiko" not in specified_packages:
required_packages.append("paramiko")
if "pycrypto" not in specified_packages:
required_packages.append("pycrypto")
#generate the list of packages necessary to build(in order of dependencies)
counter = 0
for p in required_packages:
counter = counter + 1
self.print_verbose( "Processing %s and all dependencies(%d of %d requested)"%(p, counter, len(packages)))
self.build_or_recurse(p)
# remove extra packages if specified-only flag is set
if options.specified_only:
new_list = []
for pkg in self.build_list:
if pkg in specified_packages:
new_list.append(pkg)
self.dependency_tracker = parallel_build.DependencyTracker(specified_packages) # this will make the tracker only respond to packages in the list
self.print_all("specified-only option was used, only building packages %s"%new_list)
self.build_list = new_list
if options.pre_clean:
build_queue = parallel_build.BuildQueue(self.build_list, self.dependency_tracker, robust_build = True)
self.parallel_build_pkgs(build_queue, "clean", threads = options.threads)
if "rospack" in self.build_list:
self.print_all( "Rosmake detected that rospack was requested to be cleaned. Cleaning it for it was skipped earlier.")
subprocess.check_call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack"), "clean"])
if building:
self.assert_rospack_built()
build_passed = True
if building:
self.print_verbose ("Building packages %s"% self.build_list)
build_queue = parallel_build.BuildQueue(self.build_list, self.dependency_tracker, robust_build = options.robust)
build_passed = self.parallel_build_pkgs(build_queue, options.target, threads = options.threads)
if "rospack" in self.build_list and options.target == "clean":
self.print_all( "Rosmake detected that rospack was requested to be cleaned. Cleaning it, because it was skipped earlier.")
subprocess.check_call(["make", "-C", os.path.join(os.environ["ROS_ROOT"], "tools/rospack"), "clean"])
tests_passed = True
if build_passed and testing:
self.print_verbose ("Testing packages %s"% packages)
build_queue = parallel_build.BuildQueue(specified_packages, parallel_build.DependencyTracker(specified_packages), robust_build = True)
tests_passed = self.parallel_build_pkgs(build_queue, "test", threads = 1)
if options.mark_installed:
if build_passed and tests_passed:
for p in specified_packages:
if self.flag_tracker.add_nobuild(p):
self.print_all("Marking %s as installed with a ROS_NOBUILD file"%p)
else:
self.print_all("All builds and tests did not pass cannot mark packages as installed. ")
self.finish_time = time.time() #note: before profiling
self.generate_summary_output(self.log_dir)
if options.print_profile:
self.print_all (self.get_profile_string())
return build_passed and tests_passed
|
# -*- coding: utf-8 -*-
__author__ = 'XESS Corporation'
__email__ = 'info@xess.com'
from .digikey import get_digikey_price_tiers, get_digikey_part_num, get_digikey_qty_avail, get_digikey_part_html_tree
Delete __init__.py
|
#!/usr/bin/env python
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# A script to checkout a core ansible project and it's roles with ongoing reviews
# in an environment without zuul-cloner :(
import os
import sh
GERRIT_URL = "https://review.gerrithub.io"
WORKSPACE = os.getenv('WORKSPACE')
GERRIT_PROJECT = os.getenv('GERRIT_PROJECT')
GERRIT_REFSPEC = os.getenv('GERRIT_REFSPEC')
GERRIT_CHANGE_NUMBER = os.getenv('GERRIT_CHANGE_NUMBER')
CORE_MAP = {
'redhat-openstack/weirdo': 'weirdo'
}
ROLE_MAP = {
'redhat-openstack/ansible-role-ci-centos': 'weirdo/playbooks/roles/ci-centos',
'redhat-openstack/ansible-role-weirdo-common': 'weirdo/playbooks/roles/common',
'redhat-openstack/ansible-role-weirdo-kolla': 'weirdo/playbooks/roles/kolla',
'redhat-openstack/ansible-role-weirdo-packstack': 'weirdo/playbooks/roles/packstack',
'redhat-openstack/ansible-role-weirdo-puppet-openstack': 'weirdo/playbooks/roles/puppet-openstack'
}
def clone_project(project, path):
project_url = "{0}/{1}".format(GERRIT_URL, project)
full_path = "{0}/{1}".format(WORKSPACE, path)
print(sh.git.clone(project_url, full_path))
def checkout_review(project, path):
project_url = "{0}/{1}".format(GERRIT_URL, project)
full_path = "{0}/{1}".format(WORKSPACE, path)
git_dir = "--git-dir={0}/.git".format(full_path)
work_tree = "--work-tree={0}".format(full_path)
print("Doing a fetch and checkout of {0}/{1}".format(GERRIT_URL, GERRIT_CHANGE_NUMBER))
print(sh.git(git_dir, "fetch", project_url, GERRIT_REFSPEC))
print(sh.git(git_dir, work_tree, "checkout", "FETCH_HEAD"))
if __name__ == '__main__':
# Clone core(s), role(s) and checkout review if need be
for project, path in CORE_MAP.items() + ROLE_MAP.items():
clone_project(project, path)
if project == GERRIT_PROJECT:
checkout_review(project, path)
No longer clone the ci-centos role for weirdo
Change-Id: I7983b5aaad5f1162a203db0e6b2845555f78cf47
#!/usr/bin/env python
# Copyright Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# A script to checkout a core ansible project and it's roles with ongoing reviews
# in an environment without zuul-cloner :(
import os
import sh
GERRIT_URL = "https://review.gerrithub.io"
WORKSPACE = os.getenv('WORKSPACE')
GERRIT_PROJECT = os.getenv('GERRIT_PROJECT')
GERRIT_REFSPEC = os.getenv('GERRIT_REFSPEC')
GERRIT_CHANGE_NUMBER = os.getenv('GERRIT_CHANGE_NUMBER')
CORE_MAP = {
'redhat-openstack/weirdo': 'weirdo'
}
ROLE_MAP = {
'redhat-openstack/ansible-role-weirdo-common': 'weirdo/playbooks/roles/common',
'redhat-openstack/ansible-role-weirdo-kolla': 'weirdo/playbooks/roles/kolla',
'redhat-openstack/ansible-role-weirdo-packstack': 'weirdo/playbooks/roles/packstack',
'redhat-openstack/ansible-role-weirdo-puppet-openstack': 'weirdo/playbooks/roles/puppet-openstack'
}
def clone_project(project, path):
project_url = "{0}/{1}".format(GERRIT_URL, project)
full_path = "{0}/{1}".format(WORKSPACE, path)
print(sh.git.clone(project_url, full_path))
def checkout_review(project, path):
project_url = "{0}/{1}".format(GERRIT_URL, project)
full_path = "{0}/{1}".format(WORKSPACE, path)
git_dir = "--git-dir={0}/.git".format(full_path)
work_tree = "--work-tree={0}".format(full_path)
print("Doing a fetch and checkout of {0}/{1}".format(GERRIT_URL, GERRIT_CHANGE_NUMBER))
print(sh.git(git_dir, "fetch", project_url, GERRIT_REFSPEC))
print(sh.git(git_dir, work_tree, "checkout", "FETCH_HEAD"))
if __name__ == '__main__':
# Clone core(s), role(s) and checkout review if need be
for project, path in CORE_MAP.items() + ROLE_MAP.items():
clone_project(project, path)
if project == GERRIT_PROJECT:
checkout_review(project, path)
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import (
AutomaticUpdateRule,
MatchPropertyDefinition,
CreateScheduleInstanceActionDefinition,
)
from corehq.apps.domain.models import Domain
from corehq.apps.reminders.models import (
CaseReminder,
CaseReminderHandler,
REMINDER_TYPE_DEFAULT,
REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_KEYWORD_INITIATED,
REMINDER_TYPE_SURVEY_MANAGEMENT,
UI_SIMPLE_FIXED,
EVENT_AS_OFFSET,
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
RECIPIENT_USER,
RECIPIENT_OWNER,
RECIPIENT_CASE,
RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_PARENT_CASE,
RECIPIENT_SUBCASE,
RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION,
CASE_CRITERIA,
MATCH_EXACT,
MATCH_REGEX,
MATCH_ANY_VALUE,
DAY_ANY,
)
from corehq.messaging.scheduling.models import (
AlertSchedule,
AlertEvent,
TimedSchedule,
SMSContent,
EmailContent,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseScheduleInstanceMixin,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from corehq.messaging.tasks import initiate_messaging_rule_run
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from datetime import time
from django.db import transaction
from django.db.models import Q
from django.core.management.base import BaseCommand
from six import moves
from time import sleep
def log(message):
print(message)
with open('new_reminders_migration.log', 'r+') as f:
f.write(message)
class BaseMigrator(object):
def migrate(self):
raise NotImplementedError
def migrate_schedule_instances(self):
raise NotImplementedError
class CaseReminderHandlerMigrator(BaseMigrator):
def __init__(self, handler, rule_migration_function, schedule_migration_function):
self.handler = handler
self.rule_migration_function = rule_migration_function
self.schedule_migration_function = schedule_migration_function
self.source_duplicate_count = 0
def migrate(self):
with transaction.atomic():
self.schedule = self.schedule_migration_function(self.handler)
self.rule = self.rule_migration_function(self.handler, self.schedule)
def migrate_schedule_instances(self):
if not isinstance(self.schedule, AlertSchedule):
raise TypeError("Expected AlertSchedule")
seen_case_ids = set()
recipient = self.rule.memoized_actions[0].definition.recipients[0]
for reminder in self.get_source_instances():
if reminder.case_id in seen_case_ids:
self.source_duplicate_count += 1
continue
seen_case_ids.add(reminder.case_id)
instance = CaseAlertScheduleInstance(
domain=self.rule.domain,
recipient_type=recipient[0],
recipient_id=recipient[1],
current_event_num=reminder.current_event_sequence_num,
schedule_iteration_num=reminder.schedule_iteration_num,
next_event_due=reminder.next_fire,
active=reminder.active,
alert_schedule_id=self.schedule.schedule_id,
case_id=reminder.case_id,
rule_id=self.rule.pk,
)
if reminder.active and reminder.error:
self.schedule.move_to_next_event_not_in_the_past(instance)
instance.save(force_insert=True)
def get_source_instances(self):
return list(CaseReminder.view(
'reminders/by_domain_handler_case',
startkey=[self.handler.domain, self.handler._id],
endkey=[self.handler.domain, self.handler._id, {}],
include_docs=True
).all())
def get_target_instances(self):
if isinstance(self.schedule, AlertSchedule):
return list(run_query_across_partitioned_databases(
CaseAlertScheduleInstance,
Q(alert_schedule_id=self.schedule.schedule_id),
))
elif isinstance(self.schedule, TimedSchedule):
return list(run_query_across_partitioned_databases(
CaseTimedScheduleInstance,
Q(timed_schedule_id=self.schedule.schedule_id),
))
else:
raise TypeError("Expected AlertSchedule or TimedSchedule")
def print_status(self):
source_instances = self.get_source_instances()
target_instances = self.get_target_instances()
source_instance_count = len(source_instances)
active_source_instance_count = len([i for i in source_instances if i.active])
target_instance_count = len(target_instances)
active_target_instance_count = len([i for i in target_instances if i.active])
self.target_instance_ids = set([i.schedule_instance_id for i in target_instances])
log("\n")
log("--- CaseReminderHandler %s to AutomaticUpdateRule %s ---" % (self.handler._id, self.rule.pk))
log("Duplicates: %s" % self.source_duplicate_count)
log("Source Count: %s" % source_instance_count)
log("Target Count: %s" % target_instance_count)
log("Source Active Count: %s" % active_source_instance_count)
log("Target Active Count: %s" % active_target_instance_count)
class BroadcastMigrator(BaseMigrator):
def __init__(self, handler, broadcast_migration_function, schedule_migration_function):
self.handler = handler
self.broadcast_migration_function = broadcast_migration_function
self.schedule_migration_function = schedule_migration_function
def get_extra_scheduling_options(handler):
if handler.reminder_type == REMINDER_TYPE_DEFAULT and handler.include_child_locations:
raise ValueError("Unexpected value for include_child_locations for %s" % handler._id)
return {
'active': handler.active,
'default_language_code': handler.default_lang,
'include_descendant_locations': handler.include_child_locations,
}
def check_days_until(message_dict):
for lang, message in message_dict.items():
if '.days_until' in message:
raise ValueError(".days_until is not supported")
def get_content(handler, event):
if handler.method == METHOD_SMS:
check_days_until(event.message)
return SMSContent(message=event.message)
elif handler.method == METHOD_EMAIL:
check_days_until(event.subject)
check_days_until(event.message)
return EmailContent(subject=event.subject, message=event.message)
else:
raise ValueError("Unexpected method '%s'" % handler.method)
def get_event(handler, event):
if handler.event_interpretation == EVENT_AS_OFFSET:
return AlertEvent(
minutes_to_wait=(
(event.day_num * 1440) + (event.fire_time.hour * 60) + event.fire_time.minute
)
)
else:
raise ValueError("Unexpected event_interpretation '%s'" % handler.event_interpretation)
def get_recipients(handler):
if handler.recipient == RECIPIENT_CASE:
return [(CaseScheduleInstanceMixin.RECIPIENT_TYPE_SELF, None)]
elif handler.recipient == RECIPIENT_OWNER:
return [(CaseScheduleInstanceMixin.RECIPIENT_TYPE_CASE_OWNER, None)]
else:
raise ValueError("Unexpected recipient: '%s'" % handler.recipient)
def migrate_rule(handler, schedule):
rule = AutomaticUpdateRule.objects.create(
domain=handler.domain,
name=handler.nickname,
case_type=handler.case_type,
active=True,
deleted=False,
filter_on_server_modified=False,
server_modified_boundary=None,
migrated=True,
workflow=AutomaticUpdateRule.WORKFLOW_SCHEDULING,
)
if not handler.start_property:
raise ValueError("Expected start_property")
if not (handler.start_property == '_id' and handler.start_match_type == MATCH_ANY_VALUE):
if handler.start_match_type == MATCH_ANY_VALUE:
rule.add_criteria(
MatchPropertyDefinition,
property_name=handler.start_property,
match_type=MatchPropertyDefinition.MATCH_HAS_VALUE,
)
elif handler.start_match_type == MATCH_EXACT:
if not handler.start_value:
raise ValueError("Expected start_value")
rule.add_criteria(
MatchPropertyDefinition,
property_name=handler.start_property,
property_value=handler.start_value,
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
else:
raise ValueError("Unexpected start_match_type '%s'" % handler.start_match_type)
rule.add_action(
CreateScheduleInstanceActionDefinition,
alert_schedule_id=schedule.schedule_id if isinstance(schedule, AlertSchedule) else None,
timed_schedule_id=schedule.schedule_id if isinstance(schedule, TimedSchedule) else None,
recipients=get_recipients(handler),
)
return rule
def migrate_simple_alert_schedule(handler):
return AlertSchedule.create_simple_alert(
handler.domain,
get_content(handler, handler.events[0]),
extra_options=get_extra_scheduling_options(handler),
)
def migrate_custom_alert_schedule(handler):
return AlertSchedule.create_custom_alert(
handler.domain,
[(get_event(handler, event), get_content(handler, event)) for event in handler.events],
extra_options=get_extra_scheduling_options(handler),
)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument(
"--check",
action="store_true",
dest="check",
default=False,
help="Check if the migration can proceed but don't make changes",
)
def get_rule_migration_function(self, handler):
if handler.start_condition_type != CASE_CRITERIA:
return None
if handler.start_match_type in (MATCH_EXACT, MATCH_REGEX) and not handler.start_value:
return None
if handler.start_match_type not in (MATCH_EXACT, MATCH_ANY_VALUE):
return None
if not handler.start_property or '/' in handler.start_property:
return None
if handler.start_date:
return None
if handler.until:
return None
return migrate_rule
def get_rule_schedule_migration_function(self, handler):
if handler.start_condition_type != CASE_CRITERIA:
return None
if handler.method not in (METHOD_SMS, METHOD_EMAIL):
return None
if handler.include_child_locations:
return None
if handler.custom_content_handler:
return None
if handler.recipient not in (
RECIPIENT_OWNER,
RECIPIENT_CASE,
):
return None
if handler.user_data_filter:
return None
if (
handler.event_interpretation == EVENT_AS_OFFSET and
handler.start_date is None and
handler.start_offset == 0 and
handler.start_day_of_week == DAY_ANY and
handler.max_iteration_count == 1
):
if (
len(handler.events) == 1 and
handler.events[0].day_num == 0 and
handler.events[0].fire_time in (time(0, 0), time(0, 1))
):
return migrate_simple_alert_schedule
else:
return migrate_custom_alert_schedule
return None
def get_migrator(self, handler):
if handler.locked:
return None
if handler.use_today_if_start_date_is_blank and handler.active and handler.start_date:
return None
for event in handler.events:
if event.fire_time and event.fire_time.second != 0:
return None
if handler.reminder_type == REMINDER_TYPE_DEFAULT:
rule_migration_function = self.get_rule_migration_function(handler)
schedule_migration_function = self.get_rule_schedule_migration_function(handler)
if rule_migration_function and schedule_migration_function:
return CaseReminderHandlerMigrator(handler, rule_migration_function, schedule_migration_function)
return None
elif handler.reminder_type == REMINDER_TYPE_ONE_TIME:
return None
def should_skip(self, handler):
return handler.reminder_type in (REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT)
def migration_already_done(self, domain_obj):
if domain_obj.uses_new_reminders:
log("'%s' already uses new reminders, nothing to do" % domain_obj.name)
return True
return False
def ensure_migration_flag_enabled(self, domain):
while not REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
moves.input("Please enable REMINDERS_MIGRATION_IN_PROGRESS for '%s' and hit enter..." % domain)
log("REMINDERS_MIGRATION_IN_PROGRESS enabled for %s" % domain)
def ensure_migration_flag_disabled(self, domain):
while REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
moves.input("Please disable REMINDERS_MIGRATION_IN_PROGRESS for '%s' and hit enter..." % domain)
log("REMINDERS_MIGRATION_IN_PROGRESS disabled for %s" % domain)
def get_handlers_to_migrate(self, domain):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
startkey=[domain],
endkey=[domain, {}],
include_docs=True
).all()
return [handler for handler in handlers if not self.should_skip(handler)]
def get_migrators(self, handlers):
migrators = []
cannot_be_migrated = []
for handler in handlers:
migrator = self.get_migrator(handler)
if migrator:
migrators.append(migrator)
else:
cannot_be_migrated.append(handler)
if cannot_be_migrated:
log("The following configurations can't be migrated:")
for handler in cannot_be_migrated:
log("%s %s" % (handler._id, handler.reminder_type))
return migrators, cannot_be_migrated
def migrate_handlers(self, migrators):
for migrator in migrators:
migrator.migrate()
migrator.migrate_schedule_instances()
migrator.print_status()
def confirm(message):
while True:
answer = moves.input(message).lower()
if answer == 'y':
return True
elif answer == 'n':
return False
def get_locked_count(self, domain):
return AutomaticUpdateRule.objects.filter(
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_SCHEDULING,
deleted=False,
locked_for_editing=True,
).count()
def refresh_instances(self, domain, migrators):
log("\n")
moves.input("Hit enter when ready to refresh instances...")
log("Refreshing instances...")
for migrator in migrators:
initiate_messaging_rule_run(migrator.rule.domain, migrator.rule.pk)
while self.get_locked_count(domain) > 0:
sleep(5)
log("Refresh completed.")
for migrator in migrators:
current_target_instance_ids = migrator.target_instance_ids
migrator.print_status()
new_target_instance_ids = migrator.target_instance_ids
created_instance_ids = new_target_instance_ids - current_target_instance_ids
deleted_instance_ids = current_target_instance_ids - new_target_instance_ids
if created_instance_ids or deleted_instance_ids:
log("Created instance ids: %s" % created_instance_ids)
log("Deleted instance ids: %s" % deleted_instance_ids)
else:
log("No instances created or deleted during refresh.")
def switch_on_new_reminders(self, domain, migrators):
domain_obj = Domain.get_by_name(domain)
domain_obj.uses_new_reminders = True
domain_obj.save()
for migrator in migrators:
if migrator.handler.active:
log("%s is active, deactivating..." % migrator.handler._id)
migrator.handler.active = False
migrator.handler.save()
else:
log("%s is already inactive" % migrator.handler._id)
while any([handler.locked for handler in self.get_handlers_to_migrate(domain)]):
sleep(5)
def handle(self, domain, **options):
check_only = options['check']
domain_obj = Domain.get_by_name(domain)
if self.migration_already_done(domain_obj):
return
if not check_only:
self.ensure_migration_flag_enabled(domain)
handlers = self.get_handlers_to_migrate(domain)
migrators, cannot_be_migrated = self.get_migrators(handlers)
if cannot_be_migrated:
return
log("Migration can proceed")
if check_only:
return
if not self.confirm("Are you sure you want to start the migration? y/n "):
log("Migrated halted")
return
self.migrate_handlers(migrators)
self.refresh_instances(domain, migrators)
if not self.confirm("Ok to switch on new reminders? y/n "):
log("Migrated halted")
return
self.switch_on_new_reminders(domain, migrators)
self.ensure_migration_flag_disabled()
log("Migration completed.")
Update log()
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import (
AutomaticUpdateRule,
MatchPropertyDefinition,
CreateScheduleInstanceActionDefinition,
)
from corehq.apps.domain.models import Domain
from corehq.apps.reminders.models import (
CaseReminder,
CaseReminderHandler,
REMINDER_TYPE_DEFAULT,
REMINDER_TYPE_ONE_TIME,
REMINDER_TYPE_KEYWORD_INITIATED,
REMINDER_TYPE_SURVEY_MANAGEMENT,
UI_SIMPLE_FIXED,
EVENT_AS_OFFSET,
METHOD_SMS,
METHOD_SMS_CALLBACK,
METHOD_SMS_SURVEY,
METHOD_IVR_SURVEY,
METHOD_EMAIL,
RECIPIENT_USER,
RECIPIENT_OWNER,
RECIPIENT_CASE,
RECIPIENT_SURVEY_SAMPLE,
RECIPIENT_PARENT_CASE,
RECIPIENT_SUBCASE,
RECIPIENT_USER_GROUP,
RECIPIENT_LOCATION,
CASE_CRITERIA,
MATCH_EXACT,
MATCH_REGEX,
MATCH_ANY_VALUE,
DAY_ANY,
)
from corehq.messaging.scheduling.models import (
AlertSchedule,
AlertEvent,
TimedSchedule,
SMSContent,
EmailContent,
)
from corehq.messaging.scheduling.scheduling_partitioned.models import (
CaseScheduleInstanceMixin,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from corehq.messaging.tasks import initiate_messaging_rule_run
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from datetime import time
from django.db import transaction
from django.db.models import Q
from django.core.management.base import BaseCommand
from six import moves
from time import sleep
def log(message):
print(message)
with open('new_reminders_migration.log', 'a') as f:
f.write(message)
f.write('\n')
class BaseMigrator(object):
def migrate(self):
raise NotImplementedError
def migrate_schedule_instances(self):
raise NotImplementedError
class CaseReminderHandlerMigrator(BaseMigrator):
def __init__(self, handler, rule_migration_function, schedule_migration_function):
self.handler = handler
self.rule_migration_function = rule_migration_function
self.schedule_migration_function = schedule_migration_function
self.source_duplicate_count = 0
def migrate(self):
with transaction.atomic():
self.schedule = self.schedule_migration_function(self.handler)
self.rule = self.rule_migration_function(self.handler, self.schedule)
def migrate_schedule_instances(self):
if not isinstance(self.schedule, AlertSchedule):
raise TypeError("Expected AlertSchedule")
seen_case_ids = set()
recipient = self.rule.memoized_actions[0].definition.recipients[0]
for reminder in self.get_source_instances():
if reminder.case_id in seen_case_ids:
self.source_duplicate_count += 1
continue
seen_case_ids.add(reminder.case_id)
instance = CaseAlertScheduleInstance(
domain=self.rule.domain,
recipient_type=recipient[0],
recipient_id=recipient[1],
current_event_num=reminder.current_event_sequence_num,
schedule_iteration_num=reminder.schedule_iteration_num,
next_event_due=reminder.next_fire,
active=reminder.active,
alert_schedule_id=self.schedule.schedule_id,
case_id=reminder.case_id,
rule_id=self.rule.pk,
)
if reminder.active and reminder.error:
self.schedule.move_to_next_event_not_in_the_past(instance)
instance.save(force_insert=True)
def get_source_instances(self):
return list(CaseReminder.view(
'reminders/by_domain_handler_case',
startkey=[self.handler.domain, self.handler._id],
endkey=[self.handler.domain, self.handler._id, {}],
include_docs=True
).all())
def get_target_instances(self):
if isinstance(self.schedule, AlertSchedule):
return list(run_query_across_partitioned_databases(
CaseAlertScheduleInstance,
Q(alert_schedule_id=self.schedule.schedule_id),
))
elif isinstance(self.schedule, TimedSchedule):
return list(run_query_across_partitioned_databases(
CaseTimedScheduleInstance,
Q(timed_schedule_id=self.schedule.schedule_id),
))
else:
raise TypeError("Expected AlertSchedule or TimedSchedule")
def print_status(self):
source_instances = self.get_source_instances()
target_instances = self.get_target_instances()
source_instance_count = len(source_instances)
active_source_instance_count = len([i for i in source_instances if i.active])
target_instance_count = len(target_instances)
active_target_instance_count = len([i for i in target_instances if i.active])
self.target_instance_ids = set([i.schedule_instance_id for i in target_instances])
log("\n")
log("--- CaseReminderHandler %s to AutomaticUpdateRule %s ---" % (self.handler._id, self.rule.pk))
log("Duplicates: %s" % self.source_duplicate_count)
log("Source Count: %s" % source_instance_count)
log("Target Count: %s" % target_instance_count)
log("Source Active Count: %s" % active_source_instance_count)
log("Target Active Count: %s" % active_target_instance_count)
class BroadcastMigrator(BaseMigrator):
def __init__(self, handler, broadcast_migration_function, schedule_migration_function):
self.handler = handler
self.broadcast_migration_function = broadcast_migration_function
self.schedule_migration_function = schedule_migration_function
def get_extra_scheduling_options(handler):
if handler.reminder_type == REMINDER_TYPE_DEFAULT and handler.include_child_locations:
raise ValueError("Unexpected value for include_child_locations for %s" % handler._id)
return {
'active': handler.active,
'default_language_code': handler.default_lang,
'include_descendant_locations': handler.include_child_locations,
}
def check_days_until(message_dict):
for lang, message in message_dict.items():
if '.days_until' in message:
raise ValueError(".days_until is not supported")
def get_content(handler, event):
if handler.method == METHOD_SMS:
check_days_until(event.message)
return SMSContent(message=event.message)
elif handler.method == METHOD_EMAIL:
check_days_until(event.subject)
check_days_until(event.message)
return EmailContent(subject=event.subject, message=event.message)
else:
raise ValueError("Unexpected method '%s'" % handler.method)
def get_event(handler, event):
if handler.event_interpretation == EVENT_AS_OFFSET:
return AlertEvent(
minutes_to_wait=(
(event.day_num * 1440) + (event.fire_time.hour * 60) + event.fire_time.minute
)
)
else:
raise ValueError("Unexpected event_interpretation '%s'" % handler.event_interpretation)
def get_recipients(handler):
if handler.recipient == RECIPIENT_CASE:
return [(CaseScheduleInstanceMixin.RECIPIENT_TYPE_SELF, None)]
elif handler.recipient == RECIPIENT_OWNER:
return [(CaseScheduleInstanceMixin.RECIPIENT_TYPE_CASE_OWNER, None)]
else:
raise ValueError("Unexpected recipient: '%s'" % handler.recipient)
def migrate_rule(handler, schedule):
rule = AutomaticUpdateRule.objects.create(
domain=handler.domain,
name=handler.nickname,
case_type=handler.case_type,
active=True,
deleted=False,
filter_on_server_modified=False,
server_modified_boundary=None,
migrated=True,
workflow=AutomaticUpdateRule.WORKFLOW_SCHEDULING,
)
if not handler.start_property:
raise ValueError("Expected start_property")
if not (handler.start_property == '_id' and handler.start_match_type == MATCH_ANY_VALUE):
if handler.start_match_type == MATCH_ANY_VALUE:
rule.add_criteria(
MatchPropertyDefinition,
property_name=handler.start_property,
match_type=MatchPropertyDefinition.MATCH_HAS_VALUE,
)
elif handler.start_match_type == MATCH_EXACT:
if not handler.start_value:
raise ValueError("Expected start_value")
rule.add_criteria(
MatchPropertyDefinition,
property_name=handler.start_property,
property_value=handler.start_value,
match_type=MatchPropertyDefinition.MATCH_EQUAL,
)
else:
raise ValueError("Unexpected start_match_type '%s'" % handler.start_match_type)
rule.add_action(
CreateScheduleInstanceActionDefinition,
alert_schedule_id=schedule.schedule_id if isinstance(schedule, AlertSchedule) else None,
timed_schedule_id=schedule.schedule_id if isinstance(schedule, TimedSchedule) else None,
recipients=get_recipients(handler),
)
return rule
def migrate_simple_alert_schedule(handler):
return AlertSchedule.create_simple_alert(
handler.domain,
get_content(handler, handler.events[0]),
extra_options=get_extra_scheduling_options(handler),
)
def migrate_custom_alert_schedule(handler):
return AlertSchedule.create_custom_alert(
handler.domain,
[(get_event(handler, event), get_content(handler, event)) for event in handler.events],
extra_options=get_extra_scheduling_options(handler),
)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument(
"--check",
action="store_true",
dest="check",
default=False,
help="Check if the migration can proceed but don't make changes",
)
def get_rule_migration_function(self, handler):
if handler.start_condition_type != CASE_CRITERIA:
return None
if handler.start_match_type in (MATCH_EXACT, MATCH_REGEX) and not handler.start_value:
return None
if handler.start_match_type not in (MATCH_EXACT, MATCH_ANY_VALUE):
return None
if not handler.start_property or '/' in handler.start_property:
return None
if handler.start_date:
return None
if handler.until:
return None
return migrate_rule
def get_rule_schedule_migration_function(self, handler):
if handler.start_condition_type != CASE_CRITERIA:
return None
if handler.method not in (METHOD_SMS, METHOD_EMAIL):
return None
if handler.include_child_locations:
return None
if handler.custom_content_handler:
return None
if handler.recipient not in (
RECIPIENT_OWNER,
RECIPIENT_CASE,
):
return None
if handler.user_data_filter:
return None
if (
handler.event_interpretation == EVENT_AS_OFFSET and
handler.start_date is None and
handler.start_offset == 0 and
handler.start_day_of_week == DAY_ANY and
handler.max_iteration_count == 1
):
if (
len(handler.events) == 1 and
handler.events[0].day_num == 0 and
handler.events[0].fire_time in (time(0, 0), time(0, 1))
):
return migrate_simple_alert_schedule
else:
return migrate_custom_alert_schedule
return None
def get_migrator(self, handler):
if handler.locked:
return None
if handler.use_today_if_start_date_is_blank and handler.active and handler.start_date:
return None
for event in handler.events:
if event.fire_time and event.fire_time.second != 0:
return None
if handler.reminder_type == REMINDER_TYPE_DEFAULT:
rule_migration_function = self.get_rule_migration_function(handler)
schedule_migration_function = self.get_rule_schedule_migration_function(handler)
if rule_migration_function and schedule_migration_function:
return CaseReminderHandlerMigrator(handler, rule_migration_function, schedule_migration_function)
return None
elif handler.reminder_type == REMINDER_TYPE_ONE_TIME:
return None
def should_skip(self, handler):
return handler.reminder_type in (REMINDER_TYPE_KEYWORD_INITIATED, REMINDER_TYPE_SURVEY_MANAGEMENT)
def migration_already_done(self, domain_obj):
if domain_obj.uses_new_reminders:
log("'%s' already uses new reminders, nothing to do" % domain_obj.name)
return True
return False
def ensure_migration_flag_enabled(self, domain):
while not REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
moves.input("Please enable REMINDERS_MIGRATION_IN_PROGRESS for '%s' and hit enter..." % domain)
log("REMINDERS_MIGRATION_IN_PROGRESS enabled for %s" % domain)
def ensure_migration_flag_disabled(self, domain):
while REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
moves.input("Please disable REMINDERS_MIGRATION_IN_PROGRESS for '%s' and hit enter..." % domain)
log("REMINDERS_MIGRATION_IN_PROGRESS disabled for %s" % domain)
def get_handlers_to_migrate(self, domain):
handlers = CaseReminderHandler.view(
'reminders/handlers_by_domain_case_type',
startkey=[domain],
endkey=[domain, {}],
include_docs=True
).all()
return [handler for handler in handlers if not self.should_skip(handler)]
def get_migrators(self, handlers):
migrators = []
cannot_be_migrated = []
for handler in handlers:
migrator = self.get_migrator(handler)
if migrator:
migrators.append(migrator)
else:
cannot_be_migrated.append(handler)
if cannot_be_migrated:
log("The following configurations can't be migrated:")
for handler in cannot_be_migrated:
log("%s %s" % (handler._id, handler.reminder_type))
return migrators, cannot_be_migrated
def migrate_handlers(self, migrators):
for migrator in migrators:
migrator.migrate()
migrator.migrate_schedule_instances()
migrator.print_status()
def confirm(message):
while True:
answer = moves.input(message).lower()
if answer == 'y':
return True
elif answer == 'n':
return False
def get_locked_count(self, domain):
return AutomaticUpdateRule.objects.filter(
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_SCHEDULING,
deleted=False,
locked_for_editing=True,
).count()
def refresh_instances(self, domain, migrators):
log("\n")
moves.input("Hit enter when ready to refresh instances...")
log("Refreshing instances...")
for migrator in migrators:
initiate_messaging_rule_run(migrator.rule.domain, migrator.rule.pk)
while self.get_locked_count(domain) > 0:
sleep(5)
log("Refresh completed.")
for migrator in migrators:
current_target_instance_ids = migrator.target_instance_ids
migrator.print_status()
new_target_instance_ids = migrator.target_instance_ids
created_instance_ids = new_target_instance_ids - current_target_instance_ids
deleted_instance_ids = current_target_instance_ids - new_target_instance_ids
if created_instance_ids or deleted_instance_ids:
log("Created instance ids: %s" % created_instance_ids)
log("Deleted instance ids: %s" % deleted_instance_ids)
else:
log("No instances created or deleted during refresh.")
def switch_on_new_reminders(self, domain, migrators):
domain_obj = Domain.get_by_name(domain)
domain_obj.uses_new_reminders = True
domain_obj.save()
for migrator in migrators:
if migrator.handler.active:
log("%s is active, deactivating..." % migrator.handler._id)
migrator.handler.active = False
migrator.handler.save()
else:
log("%s is already inactive" % migrator.handler._id)
while any([handler.locked for handler in self.get_handlers_to_migrate(domain)]):
sleep(5)
def handle(self, domain, **options):
check_only = options['check']
log("Handling new reminders migration for %s, --check option is %s" % (domain, check_only))
domain_obj = Domain.get_by_name(domain)
if self.migration_already_done(domain_obj):
return
if not check_only:
self.ensure_migration_flag_enabled(domain)
handlers = self.get_handlers_to_migrate(domain)
migrators, cannot_be_migrated = self.get_migrators(handlers)
if cannot_be_migrated:
return
log("Migration can proceed")
if check_only:
return
if not self.confirm("Are you sure you want to start the migration? y/n "):
log("Migrated halted")
return
self.migrate_handlers(migrators)
self.refresh_instances(domain, migrators)
if not self.confirm("Ok to switch on new reminders? y/n "):
log("Migrated halted")
return
self.switch_on_new_reminders(domain, migrators)
self.ensure_migration_flag_disabled()
log("Migration completed.")
|
import logging
from typing import List, Dict, Text, Optional, Any, Set, TYPE_CHECKING
from tqdm import tqdm
import numpy as np
import json
from rasa.shared.constants import DOCS_URL_RULES
import rasa.shared.utils.io
from rasa.shared.core.events import FormValidation, UserUttered, ActionExecuted
from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer
from rasa.shared.nlu.interpreter import NaturalLanguageInterpreter
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.policies.policy import SupportedData
from rasa.shared.core.trackers import (
DialogueStateTracker,
get_active_loop_name,
is_prev_action_listen_in_state,
)
from rasa.shared.core.generator import TrackerWithCachedStates
from rasa.core.constants import FORM_POLICY_PRIORITY
from rasa.shared.core.constants import (
USER_INTENT_RESTART,
USER_INTENT_BACK,
USER_INTENT_SESSION_START,
ACTION_LISTEN_NAME,
ACTION_RESTART_NAME,
ACTION_SESSION_START_NAME,
ACTION_DEFAULT_FALLBACK_NAME,
ACTION_BACK_NAME,
RULE_SNIPPET_ACTION_NAME,
SHOULD_NOT_BE_SET,
PREVIOUS_ACTION,
LOOP_REJECTED,
)
from rasa.shared.core.domain import InvalidDomain, State, Domain
from rasa.shared.nlu.constants import ACTION_NAME, INTENT_NAME_KEY
import rasa.core.test
if TYPE_CHECKING:
from rasa.core.policies.ensemble import PolicyEnsemble # pytype: disable=pyi-error
logger = logging.getLogger(__name__)
# These are Rasa Open Source default actions and overrule everything at any time.
DEFAULT_ACTION_MAPPINGS = {
USER_INTENT_RESTART: ACTION_RESTART_NAME,
USER_INTENT_BACK: ACTION_BACK_NAME,
USER_INTENT_SESSION_START: ACTION_SESSION_START_NAME,
}
RULES = "rules"
RULES_FOR_LOOP_UNHAPPY_PATH = "rules_for_loop_unhappy_path"
DO_NOT_VALIDATE_LOOP = "do_not_validate_loop"
DO_NOT_PREDICT_LOOP_ACTION = "do_not_predict_loop_action"
class InvalidRule(Exception):
"""Exception that can be raised when domain is not valid."""
def __init__(self, message: Text) -> None:
self.message = message + f"\nMore info at {DOCS_URL_RULES}"
def __str__(self) -> Text:
# return message in error colours
return rasa.shared.utils.io.wrap_with_color(
self.message, color=rasa.shared.utils.io.bcolors.FAIL
)
class RulePolicy(MemoizationPolicy):
"""Policy which handles all the rules"""
# rules use explicit json strings
ENABLE_FEATURE_STRING_COMPRESSION = False
# number of user inputs that is allowed in case rules are restricted
ALLOWED_NUMBER_OF_USER_INPUTS = 1
@staticmethod
def supported_data() -> SupportedData:
"""The type of data supported by this policy.
Returns:
The data type supported by this policy (rule data).
"""
return SupportedData.ML_AND_RULE_DATA
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = FORM_POLICY_PRIORITY,
lookup: Optional[Dict] = None,
core_fallback_threshold: float = 0.3,
core_fallback_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME,
enable_fallback_prediction: bool = True,
restrict_rules: bool = True,
check_rules_with_stories: bool = True,
) -> None:
"""Create a `RulePolicy` object.
Args:
featurizer: `Featurizer` which is used to convert conversation states to
features.
priority: Priority of the policy which is used if multiple policies predict
actions with the same confidence.
lookup: Lookup table which is used to pick matching rules for a conversation
state.
core_fallback_threshold: Confidence of the prediction if no rule matched
and de-facto threshold for a core fallback.
core_fallback_action_name: Name of the action which should be predicted
if no rule matched.
enable_fallback_prediction: If `True` `core_fallback_action_name` is
predicted in case no rule matched.
"""
self._core_fallback_threshold = core_fallback_threshold
self._fallback_action_name = core_fallback_action_name
self._enable_fallback_prediction = enable_fallback_prediction
self._restrict_rules = restrict_rules
self._check_rules_with_stories = check_rules_with_stories
# during training we run `predict_action_probabilities` to check for
# contradicting rules, silent prediction debug to avoid too many logs
self._silent_prediction = False
# max history is set to `None` in order to capture any lengths of rule stories
super().__init__(
featurizer=featurizer, priority=priority, max_history=None, lookup=lookup
)
@classmethod
def validate_against_domain(
cls, ensemble: Optional["PolicyEnsemble"], domain: Optional[Domain]
) -> None:
if ensemble is None:
return
rule_policy = next(
(p for p in ensemble.policies if isinstance(p, RulePolicy)), None
)
if not rule_policy or not rule_policy._enable_fallback_prediction:
return
if (
domain is None
or rule_policy._fallback_action_name not in domain.action_names
):
raise InvalidDomain(
f"The fallback action '{rule_policy._fallback_action_name}' which was "
f"configured for the {RulePolicy.__name__} must be present in the "
f"domain."
)
@staticmethod
def _is_rule_snippet_state(state: State) -> bool:
prev_action_name = state.get(PREVIOUS_ACTION, {}).get(ACTION_NAME)
return prev_action_name == RULE_SNIPPET_ACTION_NAME
def _create_feature_key(self, states: List[State]) -> Optional[Text]:
new_states = []
for state in reversed(states):
if self._is_rule_snippet_state(state):
# remove all states before RULE_SNIPPET_ACTION_NAME
break
new_states.insert(0, state)
if not new_states:
return
# we sort keys to make sure that the same states
# represented as dictionaries have the same json strings
return json.dumps(new_states, sort_keys=True)
@staticmethod
def _states_for_unhappy_loop_predictions(states: List[State]) -> List[State]:
"""Modifies the states to create feature keys for loop unhappy path conditions.
Args:
states: a representation of a tracker
as a list of dictionaries containing features
Returns:
modified states
"""
# leave only last 2 dialogue turns to
# - capture previous meaningful action before action_listen
# - ignore previous intent
if len(states) == 1 or not states[-2].get(PREVIOUS_ACTION):
return [states[-1]]
else:
return [{PREVIOUS_ACTION: states[-2][PREVIOUS_ACTION]}, states[-1]]
@staticmethod
def _remove_rule_snippet_predictions(lookup: Dict[Text, Text]) -> Dict[Text, Text]:
# Delete rules if it would predict the RULE_SNIPPET_ACTION_NAME action
return {
feature_key: action
for feature_key, action in lookup.items()
if action != RULE_SNIPPET_ACTION_NAME
}
def _create_loop_unhappy_lookup_from_states(
self,
trackers_as_states: List[List[State]],
trackers_as_actions: List[List[Text]],
) -> Dict[Text, Text]:
"""Creates lookup dictionary from the tracker represented as states.
Args:
trackers_as_states: representation of the trackers as a list of states
trackers_as_actions: representation of the trackers as a list of actions
Returns:
lookup dictionary
"""
lookup = {}
for states, actions in zip(trackers_as_states, trackers_as_actions):
action = actions[0]
active_loop = get_active_loop_name(states[-1])
# even if there are two identical feature keys
# their loop will be the same
if not active_loop:
continue
states = self._states_for_unhappy_loop_predictions(states)
feature_key = self._create_feature_key(states)
if not feature_key:
continue
# Since rule snippets and stories inside the loop contain
# only unhappy paths, notify the loop that
# it was predicted after an answer to a different question and
# therefore it should not validate user input
if (
# loop is predicted after action_listen in unhappy path,
# therefore no validation is needed
is_prev_action_listen_in_state(states[-1])
and action == active_loop
):
lookup[feature_key] = DO_NOT_VALIDATE_LOOP
elif (
# some action other than active_loop is predicted in unhappy path,
# therefore active_loop shouldn't be predicted by the rule
not is_prev_action_listen_in_state(states[-1])
and action != active_loop
):
lookup[feature_key] = DO_NOT_PREDICT_LOOP_ACTION
return lookup
def _check_rule_restriction(self, rule_trackers):
rules_exceeding_max_user_turns = []
for tracker in rule_trackers:
number_of_user_uttered = sum(
isinstance(event, UserUttered) for event in tracker.events
)
if number_of_user_uttered > self.ALLOWED_NUMBER_OF_USER_INPUTS:
rules_exceeding_max_user_turns.append(tracker.sender_id)
if rules_exceeding_max_user_turns:
raise InvalidRule(
f"Found rules '{', '.join(rules_exceeding_max_user_turns)}' "
f"that contain more than {self.ALLOWED_NUMBER_OF_USER_INPUTS} "
f"user inputs. Rules are not meant to hardcode a state machine. "
f"Please use stories for these cases."
)
def _predict_next_action(
self,
tracker: TrackerWithCachedStates,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
) -> Optional[Text]:
probabilities = self.predict_action_probabilities(tracker, domain, interpreter)
# do not raise an error if RulePolicy didn't predict anything for stories;
# however for rules RulePolicy should always predict an action
if (
probabilities != self._default_predictions(domain)
or tracker.is_rule_tracker
):
return domain.action_names[np.argmax(probabilities)]
def _check_prediction(
self,
tracker: TrackerWithCachedStates,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
gold_action_name: Text,
) -> Optional[Text]:
predicted_action_name = self._predict_next_action(tracker, domain, interpreter)
if not predicted_action_name or predicted_action_name == gold_action_name:
return
# RulePolicy will always predict active_loop first,
# but inside loop unhappy path there might be another action
if predicted_action_name == tracker.active_loop_name:
rasa.core.test.emulate_form_rejection(tracker)
predicted_action_name = self._predict_next_action(
tracker, domain, interpreter
)
if not predicted_action_name or predicted_action_name == gold_action_name:
return
tracker_type = "rule" if tracker.is_rule_tracker else "story"
return (
f"Action '{gold_action_name}' in {tracker_type} "
f"'{tracker.sender_id}' was predicted incorrectly by "
f"the {self.__class__.__name__} as action "
f"'{predicted_action_name}'."
)
def _find_contradicting_rules(
self,
trackers: List[TrackerWithCachedStates],
domain: Domain,
interpreter: NaturalLanguageInterpreter,
) -> None:
logger.debug("Started checking rules and stories for contradictions.")
self._silent_prediction = True
error_messages = []
pbar = tqdm(
trackers,
desc="Processed trackers",
disable=rasa.shared.utils.io.is_logging_disabled(),
)
for tracker in pbar:
running_tracker = tracker.init_copy()
running_tracker.sender_id = tracker.sender_id
# the first action is always unpredictable
next_action_is_unpredictable = True
for event in tracker.applied_events():
# do not run prediction on unpredictable actions
if not isinstance(event, ActionExecuted):
running_tracker.update(event)
continue
if event.action_name == RULE_SNIPPET_ACTION_NAME:
# notify that we shouldn't check that the action after
# RULE_SNIPPET_ACTION_NAME is unpredictable
next_action_is_unpredictable = True
# do not add RULE_SNIPPET_ACTION_NAME event
continue
if next_action_is_unpredictable or event.unpredictable:
next_action_is_unpredictable = False # reset unpredictability
running_tracker.update(event)
continue
gold_action_name = event.action_name or event.action_text
error_message = self._check_prediction(
running_tracker, domain, interpreter, gold_action_name
)
if error_message:
error_messages.append(error_message)
running_tracker.update(event)
self._silent_prediction = False # turn off silent logging
if error_messages:
error_messages.append(
"Please update your stories and rules so that "
"they don't contradict each other."
)
raise InvalidRule("\n".join(error_messages))
logger.debug("Found no contradictions between rules and stories")
def train(
self,
training_trackers: List[TrackerWithCachedStates],
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> None:
# only consider original trackers (no augmented ones)
training_trackers = [
t
for t in training_trackers
if not hasattr(t, "is_augmented") or not t.is_augmented
]
# only use trackers from rule-based training data
rule_trackers = [t for t in training_trackers if t.is_rule_tracker]
(
rule_trackers_as_states,
rule_trackers_as_actions,
) = self.featurizer.training_states_and_actions(rule_trackers, domain)
if self._restrict_rules:
self._check_rule_restriction(rule_trackers)
rules_lookup = self._create_lookup_from_states(
rule_trackers_as_states, rule_trackers_as_actions
)
self.lookup[RULES] = self._remove_rule_snippet_predictions(rules_lookup)
story_trackers = [t for t in training_trackers if not t.is_rule_tracker]
(
story_trackers_as_states,
story_trackers_as_actions,
) = self.featurizer.training_states_and_actions(story_trackers, domain)
# use all trackers to find negative rules in unhappy paths
trackers_as_states = rule_trackers_as_states + story_trackers_as_states
trackers_as_actions = rule_trackers_as_actions + story_trackers_as_actions
# negative rules are not anti-rules, they are auxiliary to actual rules
self.lookup[
RULES_FOR_LOOP_UNHAPPY_PATH
] = self._create_loop_unhappy_lookup_from_states(
trackers_as_states, trackers_as_actions
)
# make this configurable because checking might take a lot of time
if self._check_rules_with_stories:
# using trackers here might not be the most efficient way, however
# it allows us to directly test `predict_action_probabilities` method
self._find_contradicting_rules(training_trackers, domain, interpreter)
logger.debug(f"Memorized '{len(self.lookup[RULES])}' unique rules.")
def _output_debug_logs(self, message: Text) -> None:
if not self._silent_prediction:
logger.debug(message)
@staticmethod
def _does_rule_match_state(rule_state: State, conversation_state: State) -> bool:
for state_type, rule_sub_state in rule_state.items():
conversation_sub_state = conversation_state.get(state_type, {})
for key, value in rule_sub_state.items():
if isinstance(value, list):
# json dumps and loads tuples as lists,
# so we need to convert them back
value = tuple(value)
if (
# value should be set, therefore
# check whether it is the same as in the state
value
and value != SHOULD_NOT_BE_SET
and conversation_sub_state.get(key) != value
) or (
# value shouldn't be set, therefore
# it should be None or non existent in the state
value == SHOULD_NOT_BE_SET
and conversation_sub_state.get(key)
# this is needed to test on rule snippets
and conversation_sub_state.get(key) != SHOULD_NOT_BE_SET
):
return False
return True
@staticmethod
def _rule_key_to_state(rule_key: Text) -> List[State]:
return json.loads(rule_key)
def _is_rule_applicable(
self, rule_key: Text, turn_index: int, conversation_state: State
) -> bool:
"""Check if rule is satisfied with current state at turn."""
# turn_index goes back in time
reversed_rule_states = list(reversed(self._rule_key_to_state(rule_key)))
return bool(
# rule is shorter than current turn index
turn_index >= len(reversed_rule_states)
# current rule and state turns are empty
or (not reversed_rule_states[turn_index] and not conversation_state)
# check that current rule turn features are present in current state turn
or (
reversed_rule_states[turn_index]
and conversation_state
and self._does_rule_match_state(
reversed_rule_states[turn_index], conversation_state
)
)
)
def _get_possible_keys(
self, lookup: Dict[Text, Text], states: List[State]
) -> Set[Text]:
possible_keys = set(lookup.keys())
for i, state in enumerate(reversed(states)):
# find rule keys that correspond to current state
possible_keys = set(
filter(
lambda _key: self._is_rule_applicable(_key, i, state), possible_keys
)
)
return possible_keys
def _find_action_from_default_actions(
self, tracker: DialogueStateTracker,
) -> Optional[Text]:
if (
not tracker.latest_action_name == ACTION_LISTEN_NAME
or not tracker.latest_message
):
return None
default_action_name = DEFAULT_ACTION_MAPPINGS.get(
tracker.latest_message.intent.get(INTENT_NAME_KEY)
)
if default_action_name:
self._output_debug_logs(
f"Predicted default action '{default_action_name}'."
)
return default_action_name
def _find_action_from_loop_happy_path(
self, tracker: DialogueStateTracker,
) -> Optional[Text]:
active_loop_name = tracker.active_loop_name
active_loop_rejected = tracker.active_loop.get(LOOP_REJECTED)
should_predict_loop = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action.get(ACTION_NAME) != active_loop_name
)
should_predict_listen = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action_name == active_loop_name
)
if should_predict_loop:
self._output_debug_logs(f"Predicted loop '{active_loop_name}'.")
return active_loop_name
# predict `action_listen` if loop action was run successfully
if should_predict_listen:
self._output_debug_logs(
f"Predicted '{ACTION_LISTEN_NAME}' after loop '{active_loop_name}'."
)
return ACTION_LISTEN_NAME
def _find_action_from_rules(
self, tracker: DialogueStateTracker, domain: Domain
) -> Optional[Text]:
tracker_as_states = self.featurizer.prediction_states([tracker], domain)
states = tracker_as_states[0]
self._output_debug_logs(f"Current tracker state: {states}")
rule_keys = self._get_possible_keys(self.lookup[RULES], states)
predicted_action_name = None
best_rule_key = ""
if rule_keys:
# if there are several rules,
# it should mean that some rule is a subset of another rule
# therefore we pick a rule of maximum length
best_rule_key = max(rule_keys, key=len)
predicted_action_name = self.lookup[RULES].get(best_rule_key)
active_loop_name = tracker.active_loop_name
if active_loop_name:
# find rules for unhappy path of the loop
loop_unhappy_keys = self._get_possible_keys(
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH], states
)
# there could be several unhappy path conditions
unhappy_path_conditions = [
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH].get(key)
for key in loop_unhappy_keys
]
# Check if a rule that predicted action_listen
# was applied inside the loop.
# Rules might not explicitly switch back to the loop.
# Hence, we have to take care of that.
predicted_listen_from_general_rule = (
predicted_action_name == ACTION_LISTEN_NAME
and not get_active_loop_name(self._rule_key_to_state(best_rule_key)[-1])
)
if predicted_listen_from_general_rule:
if DO_NOT_PREDICT_LOOP_ACTION not in unhappy_path_conditions:
# negative rules don't contain a key that corresponds to
# the fact that active_loop shouldn't be predicted
self._output_debug_logs(
f"Predicted loop '{active_loop_name}' by overwriting "
f"'{ACTION_LISTEN_NAME}' predicted by general rule."
)
return active_loop_name
# do not predict anything
predicted_action_name = None
if DO_NOT_VALIDATE_LOOP in unhappy_path_conditions:
self._output_debug_logs("Added `FormValidation(False)` event.")
tracker.update(FormValidation(False))
if predicted_action_name is not None:
self._output_debug_logs(
f"There is a rule for the next action '{predicted_action_name}'."
)
else:
self._output_debug_logs("There is no applicable rule.")
return predicted_action_name
def predict_action_probabilities(
self,
tracker: DialogueStateTracker,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> List[float]:
result = self._default_predictions(domain)
# Rasa Open Source default actions overrule anything. If users want to achieve
# the same, they need to write a rule or make sure that their loop rejects
# accordingly.
default_action_name = self._find_action_from_default_actions(tracker)
if default_action_name:
return self._prediction_result(default_action_name, tracker, domain)
# A loop has priority over any other rule.
# The rules or any other prediction will be applied only if a loop was rejected.
# If we are in a loop, and the loop didn't run previously or rejected, we can
# simply force predict the loop.
loop_happy_path_action_name = self._find_action_from_loop_happy_path(tracker)
if loop_happy_path_action_name:
return self._prediction_result(loop_happy_path_action_name, tracker, domain)
rules_action_name = self._find_action_from_rules(tracker, domain)
if rules_action_name:
return self._prediction_result(rules_action_name, tracker, domain)
return result
def _default_predictions(self, domain: Domain) -> List[float]:
result = super()._default_predictions(domain)
if self._enable_fallback_prediction:
result[
domain.index_for_action(self._fallback_action_name)
] = self._core_fallback_threshold
return result
Update rasa/core/policies/rule_policy.py
Co-authored-by: Tobias Wochinger <5ab5657415213e16a8561b9cb7fb27e3a8c539ed@rasa.com>
import logging
from typing import List, Dict, Text, Optional, Any, Set, TYPE_CHECKING
from tqdm import tqdm
import numpy as np
import json
from rasa.shared.constants import DOCS_URL_RULES
import rasa.shared.utils.io
from rasa.shared.core.events import FormValidation, UserUttered, ActionExecuted
from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer
from rasa.shared.nlu.interpreter import NaturalLanguageInterpreter
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.policies.policy import SupportedData
from rasa.shared.core.trackers import (
DialogueStateTracker,
get_active_loop_name,
is_prev_action_listen_in_state,
)
from rasa.shared.core.generator import TrackerWithCachedStates
from rasa.core.constants import FORM_POLICY_PRIORITY
from rasa.shared.core.constants import (
USER_INTENT_RESTART,
USER_INTENT_BACK,
USER_INTENT_SESSION_START,
ACTION_LISTEN_NAME,
ACTION_RESTART_NAME,
ACTION_SESSION_START_NAME,
ACTION_DEFAULT_FALLBACK_NAME,
ACTION_BACK_NAME,
RULE_SNIPPET_ACTION_NAME,
SHOULD_NOT_BE_SET,
PREVIOUS_ACTION,
LOOP_REJECTED,
)
from rasa.shared.core.domain import InvalidDomain, State, Domain
from rasa.shared.nlu.constants import ACTION_NAME, INTENT_NAME_KEY
import rasa.core.test
if TYPE_CHECKING:
from rasa.core.policies.ensemble import PolicyEnsemble # pytype: disable=pyi-error
logger = logging.getLogger(__name__)
# These are Rasa Open Source default actions and overrule everything at any time.
DEFAULT_ACTION_MAPPINGS = {
USER_INTENT_RESTART: ACTION_RESTART_NAME,
USER_INTENT_BACK: ACTION_BACK_NAME,
USER_INTENT_SESSION_START: ACTION_SESSION_START_NAME,
}
RULES = "rules"
RULES_FOR_LOOP_UNHAPPY_PATH = "rules_for_loop_unhappy_path"
DO_NOT_VALIDATE_LOOP = "do_not_validate_loop"
DO_NOT_PREDICT_LOOP_ACTION = "do_not_predict_loop_action"
class InvalidRule(Exception):
"""Exception that can be raised when domain is not valid."""
def __init__(self, message: Text) -> None:
self.message = message + f"\nMore info at {DOCS_URL_RULES}"
def __str__(self) -> Text:
# return message in error colours
return rasa.shared.utils.io.wrap_with_color(
self.message, color=rasa.shared.utils.io.bcolors.FAIL
)
class RulePolicy(MemoizationPolicy):
"""Policy which handles all the rules"""
# rules use explicit json strings
ENABLE_FEATURE_STRING_COMPRESSION = False
# number of user inputs that is allowed in case rules are restricted
ALLOWED_NUMBER_OF_USER_INPUTS = 1
@staticmethod
def supported_data() -> SupportedData:
"""The type of data supported by this policy.
Returns:
The data type supported by this policy (rule data).
"""
return SupportedData.ML_AND_RULE_DATA
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = FORM_POLICY_PRIORITY,
lookup: Optional[Dict] = None,
core_fallback_threshold: float = 0.3,
core_fallback_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME,
enable_fallback_prediction: bool = True,
restrict_rules: bool = True,
check_for_contradictions: bool = True,
) -> None:
"""Create a `RulePolicy` object.
Args:
featurizer: `Featurizer` which is used to convert conversation states to
features.
priority: Priority of the policy which is used if multiple policies predict
actions with the same confidence.
lookup: Lookup table which is used to pick matching rules for a conversation
state.
core_fallback_threshold: Confidence of the prediction if no rule matched
and de-facto threshold for a core fallback.
core_fallback_action_name: Name of the action which should be predicted
if no rule matched.
enable_fallback_prediction: If `True` `core_fallback_action_name` is
predicted in case no rule matched.
"""
self._core_fallback_threshold = core_fallback_threshold
self._fallback_action_name = core_fallback_action_name
self._enable_fallback_prediction = enable_fallback_prediction
self._restrict_rules = restrict_rules
self._check_rules_with_stories = check_rules_with_stories
# during training we run `predict_action_probabilities` to check for
# contradicting rules, silent prediction debug to avoid too many logs
self._silent_prediction = False
# max history is set to `None` in order to capture any lengths of rule stories
super().__init__(
featurizer=featurizer, priority=priority, max_history=None, lookup=lookup
)
@classmethod
def validate_against_domain(
cls, ensemble: Optional["PolicyEnsemble"], domain: Optional[Domain]
) -> None:
if ensemble is None:
return
rule_policy = next(
(p for p in ensemble.policies if isinstance(p, RulePolicy)), None
)
if not rule_policy or not rule_policy._enable_fallback_prediction:
return
if (
domain is None
or rule_policy._fallback_action_name not in domain.action_names
):
raise InvalidDomain(
f"The fallback action '{rule_policy._fallback_action_name}' which was "
f"configured for the {RulePolicy.__name__} must be present in the "
f"domain."
)
@staticmethod
def _is_rule_snippet_state(state: State) -> bool:
prev_action_name = state.get(PREVIOUS_ACTION, {}).get(ACTION_NAME)
return prev_action_name == RULE_SNIPPET_ACTION_NAME
def _create_feature_key(self, states: List[State]) -> Optional[Text]:
new_states = []
for state in reversed(states):
if self._is_rule_snippet_state(state):
# remove all states before RULE_SNIPPET_ACTION_NAME
break
new_states.insert(0, state)
if not new_states:
return
# we sort keys to make sure that the same states
# represented as dictionaries have the same json strings
return json.dumps(new_states, sort_keys=True)
@staticmethod
def _states_for_unhappy_loop_predictions(states: List[State]) -> List[State]:
"""Modifies the states to create feature keys for loop unhappy path conditions.
Args:
states: a representation of a tracker
as a list of dictionaries containing features
Returns:
modified states
"""
# leave only last 2 dialogue turns to
# - capture previous meaningful action before action_listen
# - ignore previous intent
if len(states) == 1 or not states[-2].get(PREVIOUS_ACTION):
return [states[-1]]
else:
return [{PREVIOUS_ACTION: states[-2][PREVIOUS_ACTION]}, states[-1]]
@staticmethod
def _remove_rule_snippet_predictions(lookup: Dict[Text, Text]) -> Dict[Text, Text]:
# Delete rules if it would predict the RULE_SNIPPET_ACTION_NAME action
return {
feature_key: action
for feature_key, action in lookup.items()
if action != RULE_SNIPPET_ACTION_NAME
}
def _create_loop_unhappy_lookup_from_states(
self,
trackers_as_states: List[List[State]],
trackers_as_actions: List[List[Text]],
) -> Dict[Text, Text]:
"""Creates lookup dictionary from the tracker represented as states.
Args:
trackers_as_states: representation of the trackers as a list of states
trackers_as_actions: representation of the trackers as a list of actions
Returns:
lookup dictionary
"""
lookup = {}
for states, actions in zip(trackers_as_states, trackers_as_actions):
action = actions[0]
active_loop = get_active_loop_name(states[-1])
# even if there are two identical feature keys
# their loop will be the same
if not active_loop:
continue
states = self._states_for_unhappy_loop_predictions(states)
feature_key = self._create_feature_key(states)
if not feature_key:
continue
# Since rule snippets and stories inside the loop contain
# only unhappy paths, notify the loop that
# it was predicted after an answer to a different question and
# therefore it should not validate user input
if (
# loop is predicted after action_listen in unhappy path,
# therefore no validation is needed
is_prev_action_listen_in_state(states[-1])
and action == active_loop
):
lookup[feature_key] = DO_NOT_VALIDATE_LOOP
elif (
# some action other than active_loop is predicted in unhappy path,
# therefore active_loop shouldn't be predicted by the rule
not is_prev_action_listen_in_state(states[-1])
and action != active_loop
):
lookup[feature_key] = DO_NOT_PREDICT_LOOP_ACTION
return lookup
def _check_rule_restriction(self, rule_trackers):
rules_exceeding_max_user_turns = []
for tracker in rule_trackers:
number_of_user_uttered = sum(
isinstance(event, UserUttered) for event in tracker.events
)
if number_of_user_uttered > self.ALLOWED_NUMBER_OF_USER_INPUTS:
rules_exceeding_max_user_turns.append(tracker.sender_id)
if rules_exceeding_max_user_turns:
raise InvalidRule(
f"Found rules '{', '.join(rules_exceeding_max_user_turns)}' "
f"that contain more than {self.ALLOWED_NUMBER_OF_USER_INPUTS} "
f"user inputs. Rules are not meant to hardcode a state machine. "
f"Please use stories for these cases."
)
def _predict_next_action(
self,
tracker: TrackerWithCachedStates,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
) -> Optional[Text]:
probabilities = self.predict_action_probabilities(tracker, domain, interpreter)
# do not raise an error if RulePolicy didn't predict anything for stories;
# however for rules RulePolicy should always predict an action
if (
probabilities != self._default_predictions(domain)
or tracker.is_rule_tracker
):
return domain.action_names[np.argmax(probabilities)]
def _check_prediction(
self,
tracker: TrackerWithCachedStates,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
gold_action_name: Text,
) -> Optional[Text]:
predicted_action_name = self._predict_next_action(tracker, domain, interpreter)
if not predicted_action_name or predicted_action_name == gold_action_name:
return
# RulePolicy will always predict active_loop first,
# but inside loop unhappy path there might be another action
if predicted_action_name == tracker.active_loop_name:
rasa.core.test.emulate_form_rejection(tracker)
predicted_action_name = self._predict_next_action(
tracker, domain, interpreter
)
if not predicted_action_name or predicted_action_name == gold_action_name:
return
tracker_type = "rule" if tracker.is_rule_tracker else "story"
return (
f"Action '{gold_action_name}' in {tracker_type} "
f"'{tracker.sender_id}' was predicted incorrectly by "
f"the {self.__class__.__name__} as action "
f"'{predicted_action_name}'."
)
def _find_contradicting_rules(
self,
trackers: List[TrackerWithCachedStates],
domain: Domain,
interpreter: NaturalLanguageInterpreter,
) -> None:
logger.debug("Started checking rules and stories for contradictions.")
self._silent_prediction = True
error_messages = []
pbar = tqdm(
trackers,
desc="Processed trackers",
disable=rasa.shared.utils.io.is_logging_disabled(),
)
for tracker in pbar:
running_tracker = tracker.init_copy()
running_tracker.sender_id = tracker.sender_id
# the first action is always unpredictable
next_action_is_unpredictable = True
for event in tracker.applied_events():
# do not run prediction on unpredictable actions
if not isinstance(event, ActionExecuted):
running_tracker.update(event)
continue
if event.action_name == RULE_SNIPPET_ACTION_NAME:
# notify that we shouldn't check that the action after
# RULE_SNIPPET_ACTION_NAME is unpredictable
next_action_is_unpredictable = True
# do not add RULE_SNIPPET_ACTION_NAME event
continue
if next_action_is_unpredictable or event.unpredictable:
next_action_is_unpredictable = False # reset unpredictability
running_tracker.update(event)
continue
gold_action_name = event.action_name or event.action_text
error_message = self._check_prediction(
running_tracker, domain, interpreter, gold_action_name
)
if error_message:
error_messages.append(error_message)
running_tracker.update(event)
self._silent_prediction = False # turn off silent logging
if error_messages:
error_messages.append(
"Please update your stories and rules so that "
"they don't contradict each other."
)
raise InvalidRule("\n".join(error_messages))
logger.debug("Found no contradictions between rules and stories")
def train(
self,
training_trackers: List[TrackerWithCachedStates],
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> None:
# only consider original trackers (no augmented ones)
training_trackers = [
t
for t in training_trackers
if not hasattr(t, "is_augmented") or not t.is_augmented
]
# only use trackers from rule-based training data
rule_trackers = [t for t in training_trackers if t.is_rule_tracker]
(
rule_trackers_as_states,
rule_trackers_as_actions,
) = self.featurizer.training_states_and_actions(rule_trackers, domain)
if self._restrict_rules:
self._check_rule_restriction(rule_trackers)
rules_lookup = self._create_lookup_from_states(
rule_trackers_as_states, rule_trackers_as_actions
)
self.lookup[RULES] = self._remove_rule_snippet_predictions(rules_lookup)
story_trackers = [t for t in training_trackers if not t.is_rule_tracker]
(
story_trackers_as_states,
story_trackers_as_actions,
) = self.featurizer.training_states_and_actions(story_trackers, domain)
# use all trackers to find negative rules in unhappy paths
trackers_as_states = rule_trackers_as_states + story_trackers_as_states
trackers_as_actions = rule_trackers_as_actions + story_trackers_as_actions
# negative rules are not anti-rules, they are auxiliary to actual rules
self.lookup[
RULES_FOR_LOOP_UNHAPPY_PATH
] = self._create_loop_unhappy_lookup_from_states(
trackers_as_states, trackers_as_actions
)
# make this configurable because checking might take a lot of time
if self._check_rules_with_stories:
# using trackers here might not be the most efficient way, however
# it allows us to directly test `predict_action_probabilities` method
self._find_contradicting_rules(training_trackers, domain, interpreter)
logger.debug(f"Memorized '{len(self.lookup[RULES])}' unique rules.")
def _output_debug_logs(self, message: Text) -> None:
if not self._silent_prediction:
logger.debug(message)
@staticmethod
def _does_rule_match_state(rule_state: State, conversation_state: State) -> bool:
for state_type, rule_sub_state in rule_state.items():
conversation_sub_state = conversation_state.get(state_type, {})
for key, value in rule_sub_state.items():
if isinstance(value, list):
# json dumps and loads tuples as lists,
# so we need to convert them back
value = tuple(value)
if (
# value should be set, therefore
# check whether it is the same as in the state
value
and value != SHOULD_NOT_BE_SET
and conversation_sub_state.get(key) != value
) or (
# value shouldn't be set, therefore
# it should be None or non existent in the state
value == SHOULD_NOT_BE_SET
and conversation_sub_state.get(key)
# this is needed to test on rule snippets
and conversation_sub_state.get(key) != SHOULD_NOT_BE_SET
):
return False
return True
@staticmethod
def _rule_key_to_state(rule_key: Text) -> List[State]:
return json.loads(rule_key)
def _is_rule_applicable(
self, rule_key: Text, turn_index: int, conversation_state: State
) -> bool:
"""Check if rule is satisfied with current state at turn."""
# turn_index goes back in time
reversed_rule_states = list(reversed(self._rule_key_to_state(rule_key)))
return bool(
# rule is shorter than current turn index
turn_index >= len(reversed_rule_states)
# current rule and state turns are empty
or (not reversed_rule_states[turn_index] and not conversation_state)
# check that current rule turn features are present in current state turn
or (
reversed_rule_states[turn_index]
and conversation_state
and self._does_rule_match_state(
reversed_rule_states[turn_index], conversation_state
)
)
)
def _get_possible_keys(
self, lookup: Dict[Text, Text], states: List[State]
) -> Set[Text]:
possible_keys = set(lookup.keys())
for i, state in enumerate(reversed(states)):
# find rule keys that correspond to current state
possible_keys = set(
filter(
lambda _key: self._is_rule_applicable(_key, i, state), possible_keys
)
)
return possible_keys
def _find_action_from_default_actions(
self, tracker: DialogueStateTracker,
) -> Optional[Text]:
if (
not tracker.latest_action_name == ACTION_LISTEN_NAME
or not tracker.latest_message
):
return None
default_action_name = DEFAULT_ACTION_MAPPINGS.get(
tracker.latest_message.intent.get(INTENT_NAME_KEY)
)
if default_action_name:
self._output_debug_logs(
f"Predicted default action '{default_action_name}'."
)
return default_action_name
def _find_action_from_loop_happy_path(
self, tracker: DialogueStateTracker,
) -> Optional[Text]:
active_loop_name = tracker.active_loop_name
active_loop_rejected = tracker.active_loop.get(LOOP_REJECTED)
should_predict_loop = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action.get(ACTION_NAME) != active_loop_name
)
should_predict_listen = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action_name == active_loop_name
)
if should_predict_loop:
self._output_debug_logs(f"Predicted loop '{active_loop_name}'.")
return active_loop_name
# predict `action_listen` if loop action was run successfully
if should_predict_listen:
self._output_debug_logs(
f"Predicted '{ACTION_LISTEN_NAME}' after loop '{active_loop_name}'."
)
return ACTION_LISTEN_NAME
def _find_action_from_rules(
self, tracker: DialogueStateTracker, domain: Domain
) -> Optional[Text]:
tracker_as_states = self.featurizer.prediction_states([tracker], domain)
states = tracker_as_states[0]
self._output_debug_logs(f"Current tracker state: {states}")
rule_keys = self._get_possible_keys(self.lookup[RULES], states)
predicted_action_name = None
best_rule_key = ""
if rule_keys:
# if there are several rules,
# it should mean that some rule is a subset of another rule
# therefore we pick a rule of maximum length
best_rule_key = max(rule_keys, key=len)
predicted_action_name = self.lookup[RULES].get(best_rule_key)
active_loop_name = tracker.active_loop_name
if active_loop_name:
# find rules for unhappy path of the loop
loop_unhappy_keys = self._get_possible_keys(
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH], states
)
# there could be several unhappy path conditions
unhappy_path_conditions = [
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH].get(key)
for key in loop_unhappy_keys
]
# Check if a rule that predicted action_listen
# was applied inside the loop.
# Rules might not explicitly switch back to the loop.
# Hence, we have to take care of that.
predicted_listen_from_general_rule = (
predicted_action_name == ACTION_LISTEN_NAME
and not get_active_loop_name(self._rule_key_to_state(best_rule_key)[-1])
)
if predicted_listen_from_general_rule:
if DO_NOT_PREDICT_LOOP_ACTION not in unhappy_path_conditions:
# negative rules don't contain a key that corresponds to
# the fact that active_loop shouldn't be predicted
self._output_debug_logs(
f"Predicted loop '{active_loop_name}' by overwriting "
f"'{ACTION_LISTEN_NAME}' predicted by general rule."
)
return active_loop_name
# do not predict anything
predicted_action_name = None
if DO_NOT_VALIDATE_LOOP in unhappy_path_conditions:
self._output_debug_logs("Added `FormValidation(False)` event.")
tracker.update(FormValidation(False))
if predicted_action_name is not None:
self._output_debug_logs(
f"There is a rule for the next action '{predicted_action_name}'."
)
else:
self._output_debug_logs("There is no applicable rule.")
return predicted_action_name
def predict_action_probabilities(
self,
tracker: DialogueStateTracker,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> List[float]:
result = self._default_predictions(domain)
# Rasa Open Source default actions overrule anything. If users want to achieve
# the same, they need to write a rule or make sure that their loop rejects
# accordingly.
default_action_name = self._find_action_from_default_actions(tracker)
if default_action_name:
return self._prediction_result(default_action_name, tracker, domain)
# A loop has priority over any other rule.
# The rules or any other prediction will be applied only if a loop was rejected.
# If we are in a loop, and the loop didn't run previously or rejected, we can
# simply force predict the loop.
loop_happy_path_action_name = self._find_action_from_loop_happy_path(tracker)
if loop_happy_path_action_name:
return self._prediction_result(loop_happy_path_action_name, tracker, domain)
rules_action_name = self._find_action_from_rules(tracker, domain)
if rules_action_name:
return self._prediction_result(rules_action_name, tracker, domain)
return result
def _default_predictions(self, domain: Domain) -> List[float]:
result = super()._default_predictions(domain)
if self._enable_fallback_prediction:
result[
domain.index_for_action(self._fallback_action_name)
] = self._core_fallback_threshold
return result
|
from abc import ABCMeta, abstractmethod
from gusto.transport_equation import EmbeddedDGAdvection
from gusto.advection import SSPRK3, Recoverer
from firedrake import Interpolator, conditional, Function, \
min_value, max_value, as_vector, BrokenElement, FunctionSpace
from gusto import thermodynamics
__all__ = ["Condensation", "Fallout"]
class Physics(object, metaclass=ABCMeta):
"""
Base class for physics processes for Gusto.
:arg state: :class:`.State` object.
"""
def __init__(self, state):
self.state = state
@abstractmethod
def apply(self):
"""
Function computes the value of specific
fields at the next time step.
"""
pass
class Condensation(Physics):
"""
The process of condensation of water vapour
into liquid water and evaporation of liquid
water into water vapour, with the associated
latent heat changes.
:arg state: :class:`.State.` object.
"""
def __init__(self, state):
super(Condensation, self).__init__(state)
# obtain our fields
self.theta = state.fields('theta')
self.water_v = state.fields('water_v')
self.water_c = state.fields('water_c')
rho = state.fields('rho')
# declare function space
Vt = self.theta.function_space()
# make rho variables
# we recover rho into theta space
rho_averaged = Function(Vt)
self.rho_broken = Function(FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element())))
self.rho_interpolator = Interpolator(rho, self.rho_broken.function_space())
self.rho_recoverer = Recoverer(self.rho_broken, rho_averaged)
# define some parameters as attributes
dt = state.timestepping.dt
R_d = state.parameters.R_d
cp = state.parameters.cp
cv = state.parameters.cv
c_pv = state.parameters.c_pv
c_pl = state.parameters.c_pl
c_vv = state.parameters.c_vv
R_v = state.parameters.R_v
# make useful fields
Pi = thermodynamics.pi(state.parameters, rho_averaged, self.theta)
T = thermodynamics.T(state.parameters, self.theta, Pi, r_v=self.water_v)
p = thermodynamics.p(state.parameters, Pi)
L_v = thermodynamics.Lv(state.parameters, T)
R_m = R_d + R_v * self.water_v
c_pml = cp + c_pv * self.water_v + c_pl * self.water_c
c_vml = cv + c_vv * self.water_v + c_pl * self.water_c
# use Teten's formula to calculate w_sat
w_sat = thermodynamics.r_sat(state.parameters, T, p)
# make appropriate condensation rate
dot_r_cond = ((self.water_v - w_sat) /
(dt * (1.0 + ((L_v ** 2.0 * w_sat) /
(cp * R_v * T ** 2.0)))))
dot_r_cond = self.water_v - w_sat
# make cond_rate function, that needs to be the same for all updates in one time step
self.cond_rate = Function(Vt)
self.cond_rate = state.fields('cond_rate', Vt)
# adjust cond rate so negative concentrations don't occur
self.lim_cond_rate = Interpolator(conditional(dot_r_cond < 0,
max_value(dot_r_cond, - self.water_c / dt),
min_value(dot_r_cond, self.water_v / dt)), self.cond_rate)
# tell the prognostic fields what to update to
self.water_v_new = Interpolator(self.water_v - dt * self.cond_rate, Vt)
self.water_c_new = Interpolator(self.water_c + dt * self.cond_rate, Vt)
self.theta_new = Interpolator(self.theta *
(1.0 + dt * self.cond_rate *
(cv * L_v / (c_vml * cp * T) -
R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)
def apply(self):
self.rho_broken.assign(self.rho_interpolator.interpolate())
self.rho_recoverer.project()
self.lim_cond_rate.interpolate()
self.theta.assign(self.theta_new.interpolate())
self.water_v.assign(self.water_v_new.interpolate())
self.water_c.assign(self.water_c_new.interpolate())
class Fallout(Physics):
"""
The fallout process of hydrometeors.
:arg state :class: `.State.` object.
"""
def __init__(self, state):
super(Fallout, self).__init__(state)
self.state = state
self.rain = state.fields('rain')
# function spaces
Vt = self.rain.function_space()
Vu = state.fields('u').function_space()
# introduce sedimentation rate
# for now assume all rain falls at terminal velocity
terminal_velocity = 10 # in m/s
self.v = state.fields("rainfall_velocity", Vu)
self.v.project(as_vector([0, -terminal_velocity]))
# sedimentation will happen using a full advection method
advection_equation = EmbeddedDGAdvection(state, Vt, equation_form="advective", outflow=True)
self.advection_method = SSPRK3(state, self.rain, advection_equation)
def apply(self):
for k in range(self.state.timestepping.maxk):
self.advection_method.update_ubar(self.v, self.v, 0)
self.advection_method.apply(self.rain, self.rain)
add liquid water to condensation scheme and start making option for sedimentation
from abc import ABCMeta, abstractmethod
from gusto.transport_equation import EmbeddedDGAdvection
from gusto.advection import SSPRK3, Recoverer
from firedrake import Interpolator, conditional, Function, \
min_value, max_value, as_vector, BrokenElement, FunctionSpace
from gusto import thermodynamics
__all__ = ["Condensation", "Fallout"]
class Physics(object, metaclass=ABCMeta):
"""
Base class for physics processes for Gusto.
:arg state: :class:`.State` object.
"""
def __init__(self, state):
self.state = state
@abstractmethod
def apply(self):
"""
Function computes the value of specific
fields at the next time step.
"""
pass
class Condensation(Physics):
"""
The process of condensation of water vapour
into liquid water and evaporation of liquid
water into water vapour, with the associated
latent heat changes.
:arg state: :class:`.State.` object.
"""
def __init__(self, state):
super(Condensation, self).__init__(state)
# obtain our fields
self.theta = state.fields('theta')
self.water_v = state.fields('water_v')
self.water_c = state.fields('water_c')
rho = state.fields('rho')
try:
rain = state.fields('rain')
water_l = self.water_c + rain
except:
water_l = self.water_c
# declare function space
Vt = self.theta.function_space()
# make rho variables
# we recover rho into theta space
rho_averaged = Function(Vt)
self.rho_broken = Function(FunctionSpace(state.mesh, BrokenElement(Vt.ufl_element())))
self.rho_interpolator = Interpolator(rho, self.rho_broken.function_space())
self.rho_recoverer = Recoverer(self.rho_broken, rho_averaged)
# define some parameters as attributes
dt = state.timestepping.dt
R_d = state.parameters.R_d
cp = state.parameters.cp
cv = state.parameters.cv
c_pv = state.parameters.c_pv
c_pl = state.parameters.c_pl
c_vv = state.parameters.c_vv
R_v = state.parameters.R_v
# make useful fields
Pi = thermodynamics.pi(state.parameters, rho_averaged, self.theta)
T = thermodynamics.T(state.parameters, self.theta, Pi, r_v=self.water_v)
p = thermodynamics.p(state.parameters, Pi)
L_v = thermodynamics.Lv(state.parameters, T)
R_m = R_d + R_v * self.water_v
c_pml = cp + c_pv * self.water_v + c_pl * water_l
c_vml = cv + c_vv * self.water_v + c_pl * water_l
# use Teten's formula to calculate w_sat
w_sat = thermodynamics.r_sat(state.parameters, T, p)
# make appropriate condensation rate
dot_r_cond = ((self.water_v - w_sat) /
(dt * (1.0 + ((L_v ** 2.0 * w_sat) /
(cp * R_v * T ** 2.0)))))
# make cond_rate function, that needs to be the same for all updates in one time step
cond_rate = Function(Vt)
# adjust cond rate so negative concentrations don't occur
self.lim_cond_rate = Interpolator(conditional(dot_r_cond < 0,
max_value(dot_r_cond, - self.water_c / dt),
min_value(dot_r_cond, self.water_v / dt)), cond_rate)
# tell the prognostic fields what to update to
self.water_v_new = Interpolator(self.water_v - dt * cond_rate, Vt)
self.water_c_new = Interpolator(self.water_c + dt * cond_rate, Vt)
self.theta_new = Interpolator(self.theta *
(1.0 + dt * cond_rate *
(cv * L_v / (c_vml * cp * T) -
R_v * cv * c_pml / (R_m * cp * c_vml))), Vt)
def apply(self):
self.rho_broken.assign(self.rho_interpolator.interpolate())
self.rho_recoverer.project()
self.lim_cond_rate.interpolate()
self.theta.assign(self.theta_new.interpolate())
self.water_v.assign(self.water_v_new.interpolate())
self.water_c.assign(self.water_c_new.interpolate())
class Fallout(Physics):
"""
The fallout process of hydrometeors.
:arg state :class: `.State.` object.
:arg moments: an integer denoting which rainfall scheme to use.
Corresponds to the number of moments of the raindrop
distribution to be advected. Valid options:
0 -- rainfall all at terminal velocity 5 m/s.
1 -- droplet size depends upon density. Advect the mean
of the droplet size distribution.
"""
def __init__(self, state):
super(Fallout, self).__init__(state, moments)
self.state = state
self.moments = moments
self.rain = state.fields('rain')
self.v = Function(state.fields('u').function_space())
# function spaces
Vt = self.rain.function_space()
Vu = state.fields('u').function_space()
# introduce sedimentation rate
# for now assume all rain falls at terminal velocity
if moments == 0:
# all rain falls at terminal velocity
terminal_velocity = Constant(5) # in m/s
self.v.project(as_vector([0, -terminal_velocity]))
elif moments == 1:
rho = state.fields('rho')
v_expression = rho
raise NotImplementedError('sorry!')
else:
raise NotImplementedError('Currently we only have implementations for 0th and 1st moments of rainfall')
if moments > 1:
self.determine_v = Projector(as_vector([0, -v_expression]), self.v)
# sedimentation will happen using a full advection method
advection_equation = EmbeddedDGAdvection(state, Vt, equation_form="advective", outflow=True)
self.advection_method = SSPRK3(state, self.rain, advection_equation)
def apply(self):
if self.moments > 0:
self.determine_v.project()
self.advection_method.update_ubar(self.v, self.v, 0)
self.advection_method.apply(self.rain, self.rain)
|
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from datetime import datetime
from jinja2 import Template
from pylint import epylint
from sphinx.cmd.build import main as sphinx_build
from sphinx.apidoc import main as sphinx_apidoc
from mock import patch
from six.moves import configparser
from xml.dom import minidom
import abduct
import attrdict
import capturer
import click
import coverage
import coveralls
import dateutil.parser
import email
import email.header
import email.message
import email.utils
import enum
import fnmatch
import ftputil
import github
import glob
import graphviz
# import instrumental.api
import json
import karr_lab_build_utils
import logging
import mock
import natsort
import networkx
import nose
import os
import pip
import pip_check_reqs
import pip_check_reqs.find_extra_reqs
import pip_check_reqs.find_missing_reqs
# import pkg_utils
# pkg_utils is not imported globally so that we can use karr_lab_build_utils to properly calculate its coverage
# :todo: figure out how to fix this
import pkg_resources
import pytest
import re
import requests
import shutil
import six
import smtplib
import subprocess
import sys
import tempfile
import time
import twine.commands.upload
import yaml
import warnings
import whichcraft
class CoverageType(enum.Enum):
""" Types of coverage """
statement = 0
branch = 1
multiple_condition = 2
decision = 2
class Environment(enum.Enum):
""" Environments to run tests """
local = 0
docker = 1
circleci = 2
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Archive reports to test history server, Coveralls, and Code Climate
Attributes:
test_runner (:obj:`str`): name of test runner {pytest, nose}
repo_name (:obj:`str`): repository name
repo_owner (:obj:`str`): name of the repository owner
repo_branch (:obj:`str`): repository branch name
repo_revision (:obj:`str`): sha of repository revision
build_num (:obj:`int`): CircleCI build number
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_xml_dir (:obj:`str`): local directory to store latest XML test report
proj_tests_xml_latest_filename (:obj:`str`): file name to store latest XML test report
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_doctrees_dir (:obj:`str`): local directory where doc trees should be saved
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
proj_docs_build_spelling_dir (:obj:`str`): local directory where spell check results should be saved
build_image (:obj:`str`): Docker image to use to run tests
passwords_repo_url (:obj:`str`): URL to Git repository with passwords
passwords_repo_username (:obj:`str`): username for Git repository with passwords
passwords_repo_password (:obj:`str`): password for Git repository with passwords
passwords_repo_path (:obj:`str`): path to clone Git repository with passwords
coveralls_token (:obj:`str`): Coveralls token
code_climate_token (:obj:`str`): Code Climate token
github_username (obj:`str`): GitHub username
github_password (obj:`str`): GitHub password
circleci_api_token (:obj:`str`): CircleCI API token
test_server_token (:obj:`str`): test history report server token
email_password (:obj:`obj:`str`): password for karr.lab.daemon@gmail.com
INITIAL_PACKAGE_VERSION (:obj:`str`): initial package version
DEFAULT_BUILD_IMAGE_VERSION (:obj:`str`): default build image version
DEFAULT_TEST_RUNNER (:obj:`str`): default test runner {pytest, nose}
DEFAULT_PROJ_TESTS_DIR (:obj:`str`): default local directory with test code
DEFAULT_PROJ_TESTS_XML_DIR (:obj:`str`): default local directory where the test reports generated should be saved
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME (:obj:`str`): default file name to store latest XML test report
DEFAULT_PROJ_DOCS_DIR (:obj:`str`): default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR (:obj:`str`): default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR (:obj:`str`): default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_SPELLING_DIR (:obj:`str`): default local directory where spell check results should be saved
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR (:obj:`str`): default local directory where generated HTML documentation should be saved
DEFAULT_BUILD_IMAGE (:obj:`str`): default Docker image to use to run tests
GITHUB_API_ENDPOINT (:obj:`str`): GitHub API endpoint
CIRCLE_API_ENDPOINT (:obj:`str`): CircleCI API endpoint
COVERALLS_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Coveralls
CODE_CLIMATE_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Code Climate
"""
INITIAL_PACKAGE_VERSION = '0.0.1'
DEFAULT_BUILD_IMAGE_VERSION = '0.0.22'
DEFAULT_TEST_RUNNER = 'pytest'
DEFAULT_PROJ_TESTS_DIR = 'tests'
DEFAULT_PROJ_TESTS_XML_DIR = 'tests/reports'
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME = 'latest'
DEFAULT_PROJ_DOCS_DIR = 'docs'
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR = 'docs/_build/doctrees'
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR = 'docs/_build/spelling'
DEFAULT_BUILD_IMAGE = 'karrlab/build:latest'
GITHUB_API_ENDPOINT = 'https://api.github.com'
CIRCLE_API_ENDPOINT = 'https://circleci.com/api/v1.1'
COVERALLS_ENABLED = True
CODE_CLIMATE_ENABLED = True
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.test_runner = os.getenv('TEST_RUNNER', self.DEFAULT_TEST_RUNNER)
if self.test_runner not in ['pytest', 'nose']:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
self.repo_type = 'github'
self.repo_name = os.getenv('CIRCLE_PROJECT_REPONAME')
self.repo_owner = os.getenv('CIRCLE_PROJECT_USERNAME') or 'KarrLab'
self.repo_branch = os.getenv('CIRCLE_BRANCH')
self.repo_revision = os.getenv('CIRCLE_SHA1')
try:
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM')))
except (TypeError, ValueError, ):
self.build_num = 0
self.proj_tests_dir = self.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_xml_dir = self.DEFAULT_PROJ_TESTS_XML_DIR
self.proj_tests_xml_latest_filename = self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME
self.proj_docs_dir = self.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = self.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = self.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_doctrees_dir = self.DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR
self.proj_docs_build_html_dir = self.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.proj_docs_build_spelling_dir = self.DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR
self.build_image = self.DEFAULT_BUILD_IMAGE
self.passwords_repo_url = 'git@github.com:KarrLab/karr_lab_passwords.git'
self.passwords_repo_username = 'karr-lab-daemon-public'
self.passwords_repo_password = os.getenv('PASSWORDS_REPO_PASSWORD')
self.passwords_repo_path = os.path.expanduser(os.path.join('~', '.wc', 'karr_lab_passwords'))
self.download_passwords(pull=True)
self.coveralls_token = os.getenv('COVERALLS_REPO_TOKEN')
self.code_climate_token = os.getenv('CODECLIMATE_REPO_TOKEN')
self.github_username = 'karr-lab-daemon'
self.github_password = self.get_passwords().get('GITHUB_PASSWORD', None)
self.circleci_api_token = self.get_passwords().get('CIRCLECI_API_TOKEN', None)
self.test_server_token = self.get_passwords().get('TEST_SERVER_TOKEN', None)
self.email_password = self.get_passwords().get('EMAIL_PASSWORD', None)
self.code_server_hostname = 'code.karrlab.org'
self.code_server_username = 'karrlab_code'
self.code_server_password = self.get_passwords().get('CODE_SERVER_PASSWORD', None)
self.code_server_directory = '/code.karrlab.org/repo'
#####################
# Create a package
#####################
def create_package(self):
""" Create a package
* Create a local Git repository
* Create a remote GitHub repository
* Add the repository to Code Climate
* Add the repository to Coveralls
* Add the repository to CircleCI project (by following the GitHub repository)
* Add environment variable for tokens for code.karrlab.org, Coveralls, Code Climate, and CircleCI
* Add environment variable for password for karr.lab.daemon@gmail.com
* Generate API token for status badge
* If the repository is not private, add the repository to Read the Docs
* Add the package to code.karrlab.org
* Add JSON-formatted file to ``ssh://code.karrlab.org:/home/karrlab_code/code.karrlab.org/repo/{{ name }}.json``
* Add badges for Code Climate, Coveralls, CircleCI, and Read the Docs to README.md
* Add package name to ``downstream_dependencies`` key in ``.karr_lab_build_utils.yml``
"""
# print introductory message
print('This program will guide you through creating a new package.')
click.confirm('Continue?', default=True, abort=True)
# gather basic information
name = click.prompt('Enter the name of the new package', type=str)
description = click.prompt('Enter a brief description of the new package', type=str)
keywords = click.prompt('Enter a comma-separated list of keywords for the new package', type=str, default=' ')
keywords = [kw.strip() for kw in keywords.strip().split(',') if kw.strip()]
dependencies = click.prompt(
'Enter a comma-separated list of Karr Lab packages that the new package depends on', type=str, default=' ')
dependencies = [dep.strip() for dep in dependencies.strip().split(',') if dep.strip()]
private = click.confirm('Should the repository be private?', default=True)
dirname = click.prompt('Enter the directory for the new package', type=str, default=os.path.join('.', name))
build_image_version = click.prompt('Enter the build image version to test the package',
type=str, default=self.DEFAULT_BUILD_IMAGE_VERSION)
github_username = click.prompt('Enter your GitHub username', type=str, default=self.github_username)
github_password = click.prompt('Enter your GitHub password', type=str, hide_input=True,
default='*' * len(self.github_password or ''))
if github_password == '*' * len(self.github_password or ''):
github_password = self.github_password
# create local and GitHub Git repositories
print('Creating {} remote Git repository "{}/{}" on GitHub and cloning this repository to "{}"'.format(
'private' if private else 'public', self.repo_owner, name, dirname))
self.create_repository(name, description=description, private=private, dirname=dirname,
github_username=github_username, github_password=github_password)
# Code Climate
# :todo: programmatically add repo to Code Climate and generate tokens
print('Visit "https://codeclimate.com/dashboard" and click on the "{}" organization.'.format(
self.repo_owner if private else 'Open source'))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Sync now" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add a repository" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add repo" button for the "{}" repository'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "settings" link'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Cick the "Test coverage" menu item')
click.confirm('Continue?', default=True, abort=True)
code_climate_repo_token = click.prompt('Enter the "test reporter id"')
print('Cick the "Badges" menu item')
click.confirm('Continue?', default=True, abort=True)
code_climate_repo_id = click.prompt('Enter the repository ID (ID in the URL https://codeclimate.com/repos/<id>/maintainability)')
code_climate_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://api.codeclimate.com/v1/badges/<token>/maintainability)')
# Coveralls
# :todo: programmatically add repo to Coveralls and generate tokens
print('Visit "https://coveralls.io/repos/new"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "SYNC REPOS" button')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}/{}" repository and click its "OFF" button'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the details button for the "{}/{}" repository'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Settings" menu item')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_token = click.prompt('Enter the "REPO TOKEN"')
print('Click the "README BADGE" EMBED" button')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://coveralls.io/repos/github/KarrLab/test_a/badge.svg?t=<token>')
# CircleCI
# :todo: programmatically create CircleCI build
# :todo: programmatically create CircleCI token for status badges
has_private_dependencies = False
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
for dependency in dependencies:
try:
repo = org.get_repo(dependency)
has_private_dependencies = has_private_dependencies or repo.private
except github.UnknownObjectException:
pass
print('Visit "https://circleci.com/add-projects/gh/KarrLab"')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}" repository and click its "Follow project" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Project settings" icon')
click.confirm('Continue?', default=True, abort=True)
if has_private_dependencies:
print('Click the "Checkout SSH keys" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Authorize with GitHub" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create and add ... user key" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "API permissions" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create Token" button')
click.confirm('Continue?', default=True, abort=True)
print('Select "All", enter a label, and click the "Add Token" button')
click.confirm('Continue?', default=True, abort=True)
circleci_repo_token = click.prompt('Enter the new token')
vars = {
'COVERALLS_REPO_TOKEN': coveralls_repo_token,
'CODECLIMATE_REPO_TOKEN': code_climate_repo_token,
'PASSWORDS_REPO_PASSWORD': self.passwords_repo_password,
}
self.set_circleci_environment_variables(vars, repo_name=name)
# Read the Docs
if not private:
# :todo: programmatically add repo to Read the Docs
print('Visit "https://readthedocs.org/dashboard/import/?"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "refresh" icon')
click.confirm('Continue?', default=True, abort=True)
print('Find the "{}" repository and click its "+" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Next" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Admin" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Advanced settings" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Requirements file" to "docs/requirements.txt"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python configuration file" to "docs/conf.py"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python interpreter" to "CPython 3.x"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Maintainers" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr" to the maintainers')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Notifications" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add your email address and click submit')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr@gmail.com" and click submit')
click.confirm('Continue?', default=True, abort=True)
# add package to code.karrlab.org
with open(pkg_resources.resource_filename('karr_lab_build_utils',
os.path.join('templates', 'code_server', '_package_.json')), 'r') as file:
template = Template(file.read())
fid, local_filename = tempfile.mkstemp()
os.close(fid)
context = {
'name': name,
'description': description,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_token': coveralls_repo_token,
'code_climate_repo_id': code_climate_repo_id,
}
template.stream(**context).dump(local_filename)
with ftputil.FTPHost(self.code_server_hostname, self.code_server_username, self.code_server_password) as ftp:
remote_filename = ftp.path.join(self.code_server_directory, '{}.json'.format(name))
ftp.upload(local_filename, remote_filename)
os.remove(local_filename)
# setup repository
self.setup_repository(name, description=description, keywords=keywords, dependencies=dependencies,
private=private, build_image_version=build_image_version, dirname=dirname,
circleci_repo_token=circleci_repo_token, coveralls_repo_badge_token=coveralls_repo_badge_token,
code_climate_repo_id=code_climate_repo_id, code_climate_repo_badge_token=code_climate_repo_badge_token)
# append package to downstream dependencies of dependencies
parent_dirname = os.path.dirname(dirname)
for dependency in dependencies:
config_filename = os.path.join(parent_dirname, dependency, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
if 'downstream_dependencies' not in config:
config['downstream_dependencies'] = []
config['downstream_dependencies'].append(name)
with open(config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
else:
warnings.warn(('Unable to append package to downstream dependency {} because the '
'downstream dependency is not available').format(dependency),
UserWarning)
def create_repository(self, name, description='', private=True, dirname=None, github_username=None, github_password=None):
""" Create a Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
dirname (:obj:`str`, optional): directory name for repository
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
"""
# process arguments
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
dirname = dirname or os.path.join('.', name)
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
# create GitHub repository
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
org.create_repo(name=name, description=description, private=private, auto_init=True)
# initialize Git
gitconfig_filename = os.path.expanduser('~/.gitconfig')
has_gitconfig = os.path.isfile(gitconfig_filename)
if has_gitconfig:
os.rename(gitconfig_filename, gitconfig_filename + '.ignore')
import pygit2
credentials = pygit2.UserPass(github_username, github_password)
callbacks = pygit2.RemoteCallbacks(credentials=credentials)
pygit2.clone_repository('https://github.com/KarrLab/{}.git'.format(name), dirname, callbacks=callbacks)
if has_gitconfig:
os.rename(gitconfig_filename + '.ignore', gitconfig_filename)
def setup_repository(self, name, description='', keywords=None, dependencies=None, private=True, build_image_version=None,
dirname=None, circleci_repo_token=None, coveralls_repo_badge_token=None, code_climate_repo_id=None,
code_climate_repo_badge_token=None):
""" Setup Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
keywords (:obj:`list` of :obj:`str`, optional): list of keywords
dependencies (:obj:`list` of :obj:`str`, optional): list of Karr Lab packages that the package depends on
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
build_image_version (:obj:`str`, optional): build image version
dirname (:obj:`str`, optional): directory name
circleci_repo_token (:obj:`str`, optional): CircleCI API token (e.g. for badges) for the repository
coveralls_repo_badge_token (:obj:`str`, optional): Coveralls badge token for the repository
code_climate_repo_id (:obj:`str`, optional): Code Climate ID for the repository
code_climate_repo_badge_token (:obj:`str`, optional): Code Climate for the repository
"""
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
keywords = keywords or []
dependencies = dependencies or []
if not build_image_version:
build_image_version = self.DEFAULT_BUILD_IMAGE_VERSION
dirname = dirname or os.path.join('.', name)
# create a directory for the repository
if not os.path.isdir(dirname):
os.makedirs(dirname)
# create files
filenames = (
'.gitignore',
'LICENSE',
'MANIFEST.in',
'README.md',
'requirements.txt',
'requirements.optional.txt',
'setup.py',
'setup.cfg',
'tests/requirements.txt',
'tests/test_core.py',
'tests/test_main.py',
'.circleci/config.yml',
'.readthedocs.yml',
'.karr_lab_build_utils.yml',
'_package_/__init__.py',
'_package_/VERSION',
'_package_/core.py',
'_package_/__main__.py',
)
now = datetime.now()
context = {
'name': name,
'description': description,
'keywords': keywords,
'version': self.INITIAL_PACKAGE_VERSION,
'year': now.year,
'date': '{}-{}-{}'.format(now.year, now.month, now.day),
'dependencies': dependencies,
'build_image_version': build_image_version,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_badge_token': coveralls_repo_badge_token,
'code_climate_repo_id': code_climate_repo_id,
'code_climate_repo_badge_token': code_climate_repo_badge_token,
}
for filename in filenames:
if os.path.dirname(filename) and not os.path.isdir(os.path.join(dirname, os.path.dirname(filename))):
os.makedirs(os.path.join(dirname, os.path.dirname(filename)))
with open(pkg_resources.resource_filename(
'karr_lab_build_utils',
os.path.join('templates', filename)), 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, filename))
os.rename(os.path.join(dirname, '_package_'), os.path.join(dirname, name))
self.create_documentation_template(dirname)
###########################
# Register repo on CircleCI
###########################
def follow_circleci_build(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None,
has_private_dependencies=False):
""" Follow CircleCI build for a repository
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
has_private_dependencies (:obj:`bool`, optional): if :obj:`True`, add a GitHub SSH key for the Karr Lab machine user to the build
Raises:
:obj:`ValueError`: if a CircleCI build wasn't followed and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# follow repo
result = self.run_circleci_api('/follow',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
if 'following' not in result or not result['following']:
raise ValueError(
'Unable to follow CircleCI build for repository {}/{}'.format(repo_owner, repo_name))
# add checkout key
if has_private_dependencies:
# :todo: add a GitHub SSH key for the Karr Lab machine user to the build
pass # pragma: no cover
def get_circleci_environment_variables(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Get the CircleCI environment variables for a repository and their partial values
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their partial values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
vars = self.run_circleci_api('/envvar',
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
return {var['name']: var['value'] for var in vars}
def set_circleci_environment_variables(self, vars, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Set the CircleCI environment variables for a repository
Args:
vars (:obj:`dict`): dictionary of environment variables to set
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# get current environment variables
old_vars = self.get_circleci_environment_variables(
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# update environment variables
for name, value in vars.items():
# delete environment variables which we want to overwrite
if name in old_vars:
self.delete_circleci_environment_variable(name,
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# add environment variable
self.run_circleci_api('/envvar',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token, data={'name': name, 'value': value})
def delete_circleci_environment_variable(self, var, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Delete a CircleCI environment variable for a repository
Args:
var (:obj:`str`): name of variable to delete
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
self.run_circleci_api('/envvar/{}'.format(var),
method='delete', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
def create_code_climate_github_webhook(self, repo_type=None, repo_owner=None, repo_name=None,
github_username=None, github_password=None):
""" Create GitHub webhook for Code Climate
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
Raises:
:obj:`ValueError`: if webhook wasn't created and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
url = '{}/repos/{}/{}/hooks'.format(self.GITHUB_API_ENDPOINT, repo_owner, repo_name)
response = requests.post(url, auth=(github_username, github_password), json={
'name': 'web',
'config': {
'url': 'https://codeclimate.com/webhooks',
'content_type': 'form',
},
'events': [
'push',
'pull_request'
],
'active': True,
})
if response.status_code != 201:
if 'errors' in response.json():
msg = '\n '.join(err['message'] for err in response.json()['errors'])
raise ValueError('Unable to create webhook for {}/{}:\n {}'.format(repo_owner, repo_name, msg))
else:
msg = response.json()['message']
raise ValueError('Unable to create webhook for {}/{}: {}'.format(repo_owner, repo_name, msg))
#########################
# Installing dependencies
#########################
def install_requirements(self):
""" Install requirements """
# upgrade pip, setuptools
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'setuptools'])
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'pip'])
# requirements for package
self._install_requirements_helper('requirements.txt')
self._install_requirements_helper('requirements.optional.txt', ignore_options=True)
self._install_requirements_helper(os.path.join(self.proj_tests_dir, 'requirements.txt'))
self._install_requirements_helper(os.path.join(self.proj_docs_dir, 'requirements.txt'))
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
def _install_requirements_helper(self, filename, ignore_options=False):
""" Install the packages in a requirements.txt file, including all optional dependencies
Args:
filename (:obj:`str`): path to requirements file
ignore_options (:obj:`bool`, optional): if :obj:`True`, ignore option headings
(e.g. for requirements.optional.txt)
"""
if not os.path.isfile(filename):
return
# create a temporary file that has the optional markings removed
if ignore_options:
sanitized_file, sanitized_filename = tempfile.mkstemp(suffix='.txt')
os.close(sanitized_file)
with open(filename, 'r') as file:
with open(sanitized_filename, 'w') as sanitized_file:
for line in file:
line = line.strip()
if line and line[0] == '[':
continue
sanitized_file.write(line + '\n')
filename = sanitized_filename
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links', '-r', filename])
# cleanup temporary file
if ignore_options:
os.remove(sanitized_filename)
def upgrade_requirements(self):
""" Upgrade requirements from the Karr Lab's GitHub organization
Returns:
:obj:`list` of :obj:`str`: upgraded requirements from the Karr Lab's GitHub organization
"""
# get PyPI requirements
lines = self.run_method_and_capture_stdout(pip.main, ['freeze'])
pkgs = []
for line in lines.split('\n'):
if not line.startswith('-e') and '==' in line:
pkgs.append(line.partition('==')[0])
infos = self.run_method_and_capture_stdout(pip.main, ['show'] + pkgs)
reqs = []
for info in infos.split('---\n'):
if 'github.com/KarrLab/' in info:
name = info.partition('Name: ')[2].partition('\n')[0].replace('-', '_')
url = info.partition('Home-page: ')[2].partition('\n')[0]
reqs.append('git+{}.git#egg={}[all]'.format(url, name))
# ugrade PyPI requirements
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links'] + reqs)
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
return reqs
########################
# Running tests
########################
def run_tests(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, environment=Environment.local, exit_on_failure=True,
ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
environment (:obj:`str`, optional): environment to run tests (local, docker, or circleci-local-executor)
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key; needed for Docker environment
Raises:
:obj:`BuildHelperError`: If the environment is not supported or the package directory not set
"""
if environment == Environment.local:
self._run_tests_local(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, exit_on_failure=exit_on_failure)
elif environment == Environment.docker:
self._run_tests_docker(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, ssh_key_filename=ssh_key_filename)
elif environment == Environment.circleci:
self._run_tests_circleci(dirname=dirname, test_path=test_path, verbose=verbose, ssh_key_filename=ssh_key_filename)
else:
raise BuildHelperError('Unsupported environment: {}'.format(environment))
def _run_tests_local(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, exit_on_failure=True):
""" Run unit tests located at `test_path` locally
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
Raises:
:obj:`BuildHelperError`: If the package directory not set
"""
self.set_env_vars_from_passwords()
py_v = self.get_python_version()
abs_xml_latest_filename = os.path.join(
self.proj_tests_xml_dir, '{0}.{1}.xml'.format(self.proj_tests_xml_latest_filename, py_v))
if with_coverage:
if coverage_type == CoverageType.statement:
cov = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True)
cov.start()
elif coverage_type == CoverageType.branch:
cov = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True, branch=True)
cov.start()
# elif coverage_type == CoverageType.multiple_condition:
# # :todo: support instrumental once its dependency astkit is updated for Python 3
# parser = configparser.ConfigParser()
# parser.read(os.path.join(dirname, 'setup.cfg'))
# targets = parser.get('coverage:run', 'source').strip().split('\n')
# targets = [target.strip() for target in targets]
#
# opts = attrdict.AttrDict({
# 'file': os.path.join(coverage_dirname, '.coverage.' + py_v),
# 'report': False,
# 'label': False,
# 'summary': False,
# 'statements': False,
# 'xml': False,
# 'html': False,
# 'all': False,
# 'targets': targets,
# 'ignores': [],
# 'report_conditions_with_literals': False,
# 'instrument_assertions': True,
# 'use_metadata_cache': False,
# 'instrument_comparisons': True,
# })
# cov = instrumental.api.Coverage(opts, os.getcwd())
# cov.start(opts.targets, opts.ignores)
else:
raise BuildHelperError('Unsupported coverage type: {}'.format(coverage_type))
if with_xunit and not os.path.isdir(self.proj_tests_xml_dir):
os.makedirs(self.proj_tests_xml_dir)
if self.test_runner == 'pytest':
test_path = test_path.replace(':', '::')
test_path = re.sub('::(.+?)(\.)', r'::\1::', test_path)
argv = [test_path]
if verbose:
argv.append('--capture=no')
if with_xunit:
argv.append('--junitxml=' + abs_xml_latest_filename)
result = pytest.main(argv)
elif self.test_runner == 'nose':
test_path = test_path.replace('::', ':', 1)
test_path = test_path.replace('::', '.', 1)
argv = ['nosetests', test_path]
if verbose:
argv.append('--nocapture')
if with_xunit:
argv += ['--with-xunit', '--xunit-file', abs_xml_latest_filename]
result = int(not nose.run(argv=argv))
else:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
if with_coverage:
cov.stop() # pragma: no cover # this line can't be covered
cov.save()
if exit_on_failure and result != 0:
sys.exit(1)
def _run_tests_docker(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using a Docker image:
#. Create a container based on the build image (e.g, karrlab/build:latest)
#. Copy your GitHub SSH key to the container
#. Remove Python cache directories (``__pycache__``) from the package
#. Copy the package to the container at ``/root/projects``
#. Install the Karr Lab build utilities into the container
#. Install the requirements for the package in the container
#. Run the tests inside the container using the same version of Python that called this method
#. Delete the container
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
# pick container name
basename = os.path.basename(os.path.abspath(dirname))
now = datetime.now()
container = 'build-{0}-{1.year}-{1.month}-{1.day}-{1.hour}-{1.minute}-{1.second}'.format(basename, now)
# get Python version
py_v = '{}.{}'.format(sys.version_info[0], sys.version_info[1])
# create container
print('\n\n')
print('=====================================')
print('== Creating container')
print('=====================================')
self._run_docker_command(['run', '-it', '-d', '--name', container, self.build_image, 'bash'])
# copy GitHub SSH key to container
print('\n\n')
print('=====================================')
print('== Copying SSH key to container')
print('=====================================')
self._run_docker_command(['cp', ssh_key_filename, container + ':/root/.ssh/'])
# delete __pycache__ directories
print('\n\n')
print('=====================================')
print('== Deleting __pycache__ directories')
print('=====================================')
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# copy package to container
print('\n\n')
print('=====================================')
print('== Copying package to container')
print('=====================================')
self._run_docker_command(['cp', os.path.abspath(dirname), container + ':/root/project'])
# install pkg_utils
print('\n\n')
print('=====================================')
print('== Install pkg_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/pkg_utils.git#egg=pkg_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install Karr Lab build utils
print('\n\n')
print('=====================================')
print('== Install karr_lab_build_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/karr_lab_build_utils.git#egg=karr_lab_build_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install package
print('\n\n')
print('=====================================')
print('== Install package')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && pip{} install --process-dependency-links -e .'.format(py_v)])
# install dependencies
print('\n\n')
print('=====================================')
print('== Install dependencies')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && karr_lab_build_utils{} upgrade-requirements'.format(py_v)])
# test package in container
print('\n\n')
print('=====================================')
print('== Running tests')
print('=====================================')
options = []
options += ['--test-path', test_path]
if with_coverage:
options += ['--with-coverage', '--coverage-type', coverage_type.name]
if with_xunit:
options.append('--with-xunit')
if verbose:
options.append('--verbose')
self._run_docker_command(['exec',
'--env', 'PASSWORDS_REPO_PASSWORD={}'.format(self.passwords_repo_password),
container,
'bash', '-c',
'cd /root/project && karr_lab_build_utils{} run-tests {}'.format(py_v, ' '.join(options))],
raise_error=False)
if with_coverage:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', '.coverage.{}.*'.format(py_v))])
match = re.search('/root/project/(\.coverage\.\d+\.\d+\.\d+)', out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(coverage_dirname, match.group(1))])
if with_xunit:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', self.DEFAULT_PROJ_TESTS_XML_DIR,
'{}.{}.*.xml'.format(self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME, py_v))])
match = re.search('/root/project/{}/({}\.\d+\.\d+\.\d+.xml)'.format(self.DEFAULT_PROJ_TESTS_XML_DIR,
self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME), out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(self.proj_tests_xml_dir, match.group(1))])
# stop and remove container
print('\n\n')
print('=====================================')
print('== Removing container')
print('=====================================')
self._run_docker_command(['rm', '-f', container])
def _run_docker_command(self, cmd, cwd=None, raise_error=True):
""" Run a docker command
Args:
cmd (:obj:`list`): docker command to run
cwd (:obj:`str`, optional): directory from which to run :obj:`cmd`
raise_error (:obj:`bool`, optional): if true, raise errors
Returns:
:obj:`str`: standard output
Raises:
:obj:`BuildHelperError`: if the docker command fails
"""
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['docker'] + cmd, cwd=cwd)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
if process.returncode != 0 and raise_error:
raise BuildHelperError(out)
return out
def _run_tests_circleci(self, dirname='.', test_path='tests', verbose=False, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using the CircleCI local executor. This will run the same commands defined in
``.circle/config.yml`` as the cloud version of CircleCI.
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
Raises:
:obj:`BuildHelperError`: if the tests fail
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
karr_lab_build_utils_dirname = os.path.expanduser('~/Documents/karr_lab_build_utils')
# delete __pycache__ directories
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# update CircleCI to use build image with SSH key
circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml')
backup_circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml.save')
with open(circleci_config_filename, 'r') as file:
config = yaml.load(file)
image_name = config['jobs']['build']['docker'][0]['image']
if image_name.endswith('.with_ssh_key'):
image_with_ssh_key_name = image_name
image_name = image_name[:-13]
else:
image_with_ssh_key_name = image_name + '.with_ssh_key'
shutil.copyfile(circleci_config_filename, backup_circleci_config_filename)
config['jobs']['build']['docker'][0]['image'] = image_with_ssh_key_name
with open(circleci_config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# Build docker image with SSH key
circleci_context_dirname = os.path.join(karr_lab_build_utils_dirname, 'circleci_docker_context')
if not os.path.isdir(circleci_context_dirname):
os.makedirs(circleci_context_dirname)
shutil.copy(ssh_key_filename, os.path.join(circleci_context_dirname, 'GITHUB_SSH_KEY'))
dockerfile_filename = os.path.join(circleci_context_dirname, 'Dockerfile_Circleci')
with open(dockerfile_filename, 'w') as file:
file.write('FROM {}\n'.format(image_name))
file.write('COPY circleci_docker_context/GITHUB_SSH_KEY /root/.ssh/id_rsa\n')
file.write('ENV TEST_SERVER_TOKEN={}\n'.format(self.test_server_token or ''))
file.write('RUN eval `ssh-agent` && ssh-add /root/.ssh/id_rsa\n')
file.write('CMD bash\n')
self._run_docker_command(['build',
'--tag', image_with_ssh_key_name,
'-f', os.path.join('circleci_docker_context', 'Dockerfile_Circleci'),
'.'],
cwd=karr_lab_build_utils_dirname)
# test package
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['circleci',
'--env', 'test_path={}'.format(test_path),
'--env', 'verbose={:d}'.format(verbose),
'--env', 'dry_run=1',
'--env', 'PASSWORDS_REPO_PASSWORD={}'.format(self.passwords_repo_password),
'build'], cwd=dirname)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
# revert CircleCI config file
os.remove(circleci_config_filename)
shutil.move(backup_circleci_config_filename, circleci_config_filename)
# delete docker image
self._run_docker_command(['rmi', image_with_ssh_key_name], raise_error=False)
# cleanup circleci context
shutil.rmtree(circleci_context_dirname)
# raise error if tests didn't pass
if process.returncode != 0 or 'Task failed' in out:
raise BuildHelperError(out.encode('utf-8'))
def get_test_results(self):
""" Load test results from a set of XML files
Results:
:obj:`TestResults`: test results
"""
test_results = TestResults()
filename_pattern = os.path.join(self.proj_tests_xml_dir,
'{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for filename in glob.glob(filename_pattern):
match = re.match('^{}\.(.*?)\.xml$'.format(self.proj_tests_xml_latest_filename), os.path.basename(filename))
python_version = match.group(1)
doc = minidom.parse(filename)
suite = doc.getElementsByTagName('testsuite')[0]
for case in suite.getElementsByTagName('testcase'):
case_result = TestCaseResult()
case_result.classname = case.getAttribute('classname')
case_result.name = case.getAttribute('name')
case_result.python_version = python_version
case_result.time = float(case.getAttribute('time'))
if case.hasAttribute('file'):
case_result.file = case.getAttribute('file')
if case.hasAttribute('line'):
case_result.line = int(float(case.getAttribute('line')))
stdout = case.getElementsByTagName('system-out')
if stdout:
case_result.stdout = ''.join([child.nodeValue for child in stdout[0].childNodes])
stderr = case.getElementsByTagName('system-err')
if stderr:
case_result.stderr = ''.join([child.nodeValue for child in stderr[0].childNodes])
skip = case.getElementsByTagName('skipped')
error = case.getElementsByTagName('error')
failure = case.getElementsByTagName('failure')
if skip:
case_result.type = TestCaseResultType.skipped
elif error:
case_result.type = TestCaseResultType.error
elif failure:
case_result.type = TestCaseResultType.failure
else:
case_result.type = TestCaseResultType.passed
not_pass = skip or error or failure
if not_pass:
case_result.subtype = not_pass[0].getAttribute('type')
case_result.message = not_pass[0].getAttribute('message')
case_result.details = ''.join([child.nodeValue for child in not_pass[0].childNodes])
test_results.cases.append(case_result)
return test_results
def get_test_results_status(self, test_results, installation_error, tests_error, other_error, dry_run=False):
""" Get the status of a set of results
* Old err
* New error
* Fixed error
* New downstream error
Args:
test_results (:obj:`TestResults`): test results
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
if dry_run:
return {
'is_fixed': False,
'is_old_error': False,
'is_new_error': False,
'is_other_error': False,
'is_new_downstream_error': False,
}
# determine if there is an error
if (installation_error or tests_error or other_error) and test_results.get_num_tests() == 0:
is_other_error = True
is_new_error = False
is_old_error = False
is_fixed = False
else:
is_other_error = False
passed = test_results.get_num_errors() == 0 and test_results.get_num_failures() == 0
# determine if error is new
if self.build_num <= 1:
if passed:
is_old_error = False
is_new_error = False
is_fixed = True
else:
is_old_error = False
is_new_error = True
is_fixed = False
else:
prev_result = self.run_circleci_api('/' + str(self.build_num - 1))
if passed:
is_old_error = False
is_new_error = False
is_fixed = prev_result['status'] not in ['success', 'fixed']
else:
is_old_error = prev_result['status'] not in ['success', 'fixed']
is_new_error = prev_result['status'] in ['success', 'fixed']
is_fixed = False
# determine if build was triggered by an upstream package
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
if upstream_repo_name and is_new_error and self.build_num > 1 and not is_other_error:
is_new_downstream_error = True
else:
is_new_downstream_error = False
return {
'is_fixed': is_fixed,
'is_old_error': is_old_error,
'is_new_error': is_new_error,
'is_other_error': is_other_error,
'is_new_downstream_error': is_new_downstream_error,
}
def do_post_test_tasks(self, installation_error, tests_error, dry_run=False):
""" Do all post-test tasks for CircleCI
* Make test and coverage reports
* Compile documentation
* Archive test and coverage reports to the Karr Lab test history server, Coveralls, and Code Climate
* Trigger tests of downstream dependencies
* Notify authors of new failures in downstream packages
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:obj:`dict`: status of a set of results
"""
try:
static_analyses = self.make_and_archive_reports(dry_run=dry_run)
other_error = False
except Exception as exception:
static_analyses = {'missing_requirements': [], 'unused_requirements': []}
other_error = True
triggered_packages = self.trigger_tests_of_downstream_dependencies(dry_run=dry_run)
status = self.send_email_notifications(installation_error, tests_error, other_error, static_analyses, dry_run=dry_run)
return (triggered_packages, status)
def send_email_notifications(self, installation_error, tests_error, other_error, static_analyses, dry_run=False):
""" Send email notifications of failures, fixes, and downstream failures
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
static_analyses (:obj:`dict`): analyses of missing and unused requirements
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
test_results = self.get_test_results()
status = self.get_test_results_status(test_results, installation_error, tests_error, other_error, dry_run=dry_run)
# stop if this is a dry run
if dry_run:
return status
# build context for email
result = self.run_circleci_api('/' + str(self.build_num))
context = {
'repo_name': self.repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': self.build_num,
'build_url': result['build_url'],
'test_results': test_results,
'static_analyses': static_analyses,
}
if status['is_new_downstream_error']:
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
context['upstream'] = {
'repo_name': upstream_repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': upstream_build_num,
'build_url': result['build_url'],
}
config = self.get_build_config()
recipients = config.get('email_notifications', [])
# send notifications
if status['is_fixed']:
subject = '[Builds] [{0}] {0} is fixed!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'fixed.html', context)
elif status['is_old_error']:
subject = '[Builds] [{0}] {0} is still broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'old_error.html', context)
elif status['is_new_error']:
subject = '[Builds] [{0}] {0} has been broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'new_error.html', context)
elif status['is_other_error']:
subject = '[Builds] [{0}] {0} is broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'other_error.html', context)
if status['is_new_downstream_error']:
recipients.append('wholecell-developers@googlegroups.com')
subject = '[Builds] [{1}] commit {0} to {1} may have broken {2}'.format(
context['upstream']['commit'], context['upstream']['repo_name'], context['repo_name'])
self._send_notification_email(recipients, subject, 'new_downstream_error.html', context)
return status
def _send_notification_email(self, recipients, subject, template_filename, context, dry_run=False):
""" Send an email notification of test results
Args:
recipients (:obj:`list` of :obj:`str`): recipient email addresses
subject (:obj:`str`): subject
template_filename (obj:`str`): path to template
context (obj:`dict`): context for template
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
full_template_filename = pkg_resources.resource_filename(
'karr_lab_build_utils', os.path.join('templates', 'email_notifications', template_filename))
with open(full_template_filename, 'r') as file:
template = Template(file.read())
body = template.render(**context)
msg = email.message.Message()
msg['From'] = email.utils.formataddr((str(email.header.Header('Karr Lab Build System', 'utf-8')), 'noreply@karrlab.org'))
tos = []
for recipient in recipients:
tos.append(email.utils.formataddr((None, recipient)))
msg['To'] = ', '.join(tos)
msg['Subject'] = subject
msg.add_header('Content-Type', 'text/html')
msg.set_payload(body)
if not dry_run:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.ehlo()
smtp.starttls()
smtp.login('karr.lab.daemon', self.email_password)
smtp.sendmail('noreply@karrlab.org', recipients, msg.as_string())
smtp.quit()
def make_and_archive_reports(self, coverage_dirname='.', dry_run=False):
""" Make and archive reports:
* Upload test report to history server
* Upload coverage report to Coveralls and Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: analyses of missing and unused requirements
"""
errors = []
""" test reports """
# Upload test report to history server
self.archive_test_report()
""" coverage """
# Merge coverage reports
# Generate HTML report
# Upload coverage report to Coveralls and Code Climate
self.combine_coverage_reports(coverage_dirname=coverage_dirname)
self.archive_coverage_report(coverage_dirname=coverage_dirname, dry_run=dry_run)
""" static analysis """
config = self.get_build_config()
ignore_files = config.get('static_analyses', {}).get('ignore_files', [])
missing_reqs = self.find_missing_requirements(self.repo_name, ignore_files=ignore_files)
if missing_reqs:
errors.append('The following requirements are missing:\n {}'.format(
'\n '.join(missing_req[0] for missing_req in missing_reqs)))
unused_reqs = self.find_unused_requirements(self.repo_name, ignore_files=ignore_files)
if unused_reqs:
msg = 'The following requirements appear to be unused:\n {}'.format('\n '.join(unused_reqs))
warnings.warn(msg, UserWarning)
""" documentation """
self.make_documentation()
""" Throw error """
if errors:
raise BuildHelperError('\n\n'.join(errors))
return {
'missing_requirements': missing_reqs,
'unused_requirements': unused_reqs,
}
########################
# Test reports
########################
def archive_test_report(self):
""" Upload test report to history server
Raises:
:obj:`BuildHelperError`: if there is an error uploading the report to the test history server
"""
if not self.test_server_token or \
self.repo_name is None or \
self.repo_owner is None or \
self.repo_branch is None or \
self.repo_revision is None:
return
abs_xml_latest_filename_pattern = os.path.join(
self.proj_tests_xml_dir, '{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for abs_xml_latest_filename in glob.glob(abs_xml_latest_filename_pattern):
match = re.match('^.*?\.(\d+\.\d+\.\d+)\.xml$', abs_xml_latest_filename)
pyv = match.group(1)
r = requests.post('http://tests.karrlab.org/rest/submit_report',
data={
'token': self.test_server_token,
'repo_name': self.repo_name,
'repo_owner': self.repo_owner,
'repo_branch': self.repo_branch,
'repo_revision': self.repo_revision,
'build_num': self.build_num,
'report_name': pyv,
},
files={
'report': open(abs_xml_latest_filename, 'rb'),
})
r.raise_for_status()
r_json = r.json()
if 'success' not in r_json or not r_json['success']:
raise BuildHelperError('Error uploading report to test history server: {}'.format(r_json['message']))
########################
# Coverage reports
########################
def combine_coverage_reports(self, coverage_dirname='.'):
"""
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
"""
data_paths = []
for name in glob.glob(os.path.join(coverage_dirname, '.coverage.*')):
data_path = tempfile.mktemp()
shutil.copyfile(name, data_path)
data_paths.append(data_path)
# stop if there are no files to combine
if not data_paths:
warnings.warn('No coverage files exist to combine', UserWarning)
return
coverage_doc = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
coverage_doc.combine(data_paths=data_paths)
coverage_doc.save()
def archive_coverage_report(self, coverage_dirname='.', dry_run=False):
""" Archive coverage report:
* Upload report to Coveralls
* Upload report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
# upload to Coveralls
if self.COVERALLS_ENABLED:
self.upload_coverage_report_to_coveralls(coverage_dirname=coverage_dirname, dry_run=dry_run)
# upload to Code Climate
if self.CODE_CLIMATE_ENABLED:
self.upload_coverage_report_to_code_climate(coverage_dirname=coverage_dirname, dry_run=dry_run)
def upload_coverage_report_to_coveralls(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Coveralls
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Coveralls', UserWarning)
return
if self.coveralls_token:
runner = coveralls.Coveralls(True, repo_token=self.coveralls_token,
service_name='circle-ci', service_job_id=self.build_num)
def get_coverage():
workman = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
return coveralls.reporter.CoverallReporter(workman, workman.config).report()
with patch.object(coveralls.Coveralls, 'get_coverage', return_value=get_coverage()):
runner.wear(dry_run=dry_run)
def upload_coverage_report_to_code_climate(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
Raises:
:obj:`BuildHelperError`: If error uploading code coverage to Code Climate
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Code Climate', UserWarning)
return
# save coverage data to xml
xml_cov_filename = 'coverage.xml'
workman = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
workman.xml_report(outfile=xml_cov_filename)
# download the Code Climate test reporter
response = requests.get('https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64')
response.raise_for_status()
cc_path = os.path.expanduser('~/cc-test-reporter')
with open(cc_path, 'wb') as file:
file.write(response.content)
os.chmod(cc_path, 0o755)
# run the reporter
if not dry_run:
subprocess.check_call([cc_path, 'before-build'])
subprocess.check_call([cc_path, 'after-build',
'-t', 'coverage.py',
'-r', self.code_climate_token,
])
########################
# Documentation
########################
def create_documentation_template(self, dirname='.'):
""" Create Sphinx documentation template for a package
Args:
dirname (:obj:`str`, optional): path to package
Raises:
:obj:`ValueError`: if no package or more than one package is specified
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
packages = parser.get('sphinx-apidocs', 'packages').strip().split('\n')
if len(packages) != 1:
raise ValueError('Sphinx configuration auto-generation only supports 1 package')
if not os.path.isdir(os.path.join(dirname, self.proj_docs_dir)):
os.mkdir(os.path.join(dirname, self.proj_docs_dir))
for package in packages:
filenames = [
'conf.py',
'requirements.txt',
'conda.environment.yml',
'spelling_wordlist.txt',
'index.rst',
'overview.rst',
'installation.rst',
'about.rst',
'references.rst',
'references.bib',
]
context = {
"package": package,
'version': self.INITIAL_PACKAGE_VERSION,
'year': datetime.now().year,
'package_underline': '=' * len(package),
}
for filename in filenames:
template_filename = pkg_resources.resource_filename('karr_lab_build_utils', os.path.join('templates', 'docs', filename))
with open(template_filename, 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, self.proj_docs_dir, filename))
def make_documentation(self, spell_check=False):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Args:
spell_check (:obj:`bool`): if :obj:`True`, run spell checking
Raises:
:obj:`BuildHelperError`: If project name not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
os.mkdir(self.proj_docs_static_dir)
# build HTML documentation
self.run_method_and_capture_stderr(sphinx_build, [self.proj_docs_dir, self.proj_docs_build_html_dir])
# run spell check
if spell_check:
self.run_method_and_capture_stderr(sphinx_build, [
'-b', 'spelling',
'-d', self.proj_docs_build_doctrees_dir,
self.proj_docs_dir,
self.proj_docs_build_spelling_dir,
])
def compile_downstream_dependencies(self, dirname='.', packages_parent_dir='..', config_filename=None):
""" Compile the downstream dependencies of a package and save them to :obj:`config_filename`
Args:
dirname (:obj:`str`, optional): path to package
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
config_filename (:obj:`str`, optional): path to save configuration with list of downstream dependencies
in YAML format
Returns:
:obj:`list` of :obj:`str`: downstream dependencies
Raises:
:obj:`BuildHelperError`: if a package has more than one module
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
packages_parent_dir = os.path.abspath(packages_parent_dir)
# get the name of the current package
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
tmp = parser.get('coverage:run', 'source').strip().split('\n')
if len(tmp) != 1:
raise BuildHelperError('Package should have only one module')
this_pkg_name = tmp[0]
# collect the downstream dependencies by analyzing the requirements files of other packages
# :todo: support branches
downstream_dependencies = []
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
other_pkg_name = dirname[len(packages_parent_dir) + 1:]
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
if this_pkg_name in install_requires or this_pkg_name in extras_require['all']:
downstream_dependencies.append(other_pkg_name)
# save the downstream dependencies to a file
if config_filename:
config = {}
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
config['downstream_dependencies'] = downstream_dependencies
with open(config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# return the downstream dependencies
return downstream_dependencies
def are_package_dependencies_acyclic(self, packages_parent_dir='..'):
""" Check if the package dependencies are acyclic so they are supported by CircleCI
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
Returns:
:obj:`bool`: :obj:`True` if the package dependencies are acyclic
"""
graph = networkx.DiGraph()
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
graph.add_node(pkg)
# create edges for dependencies
config_filename = os.path.join(dirname, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
deps = config.get('downstream_dependencies', [])
for other_pkg in deps:
graph.add_edge(pkg, other_pkg)
try:
networkx.algorithms.cycles.find_cycle(graph)
return False
except networkx.NetworkXNoCycle:
return True
def visualize_package_dependencies(self, packages_parent_dir='..', out_filename='../package_dependencies.pdf'):
""" Visualize downstream package dependencies as a graph
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
out_filename (:obj:`str`, optional): path to save visualization
"""
basename, format = os.path.splitext(out_filename)
dot = graphviz.Digraph(format=format[1:])
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
dot.node(pkg, pkg)
# create edges for dependencies
config_filename = os.path.join(dirname, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
deps = config.get('downstream_dependencies', [])
for other_pkg in deps:
dot.edge(pkg, other_pkg)
dot.render(filename=basename, cleanup=True)
def trigger_tests_of_downstream_dependencies(self, config_filename='.karr_lab_build_utils.yml',
dry_run=False):
""" Trigger CircleCI to test downstream dependencies listed in :obj:`config_filename`
Args:
config_filename (:obj:`str`, optional): path to YAML configuration file which contains a list of
downstream dependencies
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:todo: support branches
"""
# stop if this is a dry run
if dry_run:
return []
# stop if the tests didn't pass
test_results = self.get_test_results()
if test_results.get_num_errors() > 0 or test_results.get_num_failures() > 0:
return []
# read downstream dependencies
with open(config_filename, 'r') as file:
config = yaml.load(file)
packages = config.get('downstream_dependencies', [])
# stop if there are no downstream dependencies
if not packages:
return []
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = os.getenv('UPSTREAM_BUILD_NUM', '0')
if not upstream_repo_name:
upstream_repo_name = self.repo_name
upstream_build_num = str(self.build_num)
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
upstream_build_time = dateutil.parser.parse(result['all_commit_details'][0]['committer_date'])
triggered_packages = []
for package in packages:
branch = 'master'
# get summary of recent builds
builds = self.run_circleci_api('', repo_name=package)
# don't trigger build if a build has already been triggered from the same upstream build
# this prevents building the same project multiple times, including infinite looping
already_queued = False
for build in builds:
# don'trigger a build if this is the same package which triggered the cascade
if package == upstream_repo_name and \
str(build['build_num']) == upstream_build_num and \
build['build_num'] != self.build_num:
already_queued = True
break
# don't trigger a build if the package already been triggered from the same upstream commit
build_parameters = build['build_parameters']
if build_parameters and 'UPSTREAM_REPONAME' in build_parameters and \
build_parameters['UPSTREAM_REPONAME'] == upstream_repo_name and \
build_parameters['UPSTREAM_BUILD_NUM'] == upstream_build_num:
already_queued = True
break
# don't trigger a build if the package has already been more recently tested than the commit time
build_start_time = build['start_time']
if build_start_time is None or dateutil.parser.parse(build['start_time']) > upstream_build_time:
already_queued = True
break
if already_queued:
continue
# trigger build
self.run_circleci_api('/tree/{}'.format(branch), method='post', repo_name=package, data={
'build_parameters': {
'UPSTREAM_REPONAME': upstream_repo_name,
'UPSTREAM_BUILD_NUM': upstream_build_num,
}
})
triggered_packages.append(package)
return triggered_packages
def get_version(self):
""" Get the version of this package
Returns:
:obj:`str`: the version
"""
return '{0:s} (Python {1[0]:d}.{1[1]:d}.{1[2]:d})'.format(karr_lab_build_utils.__version__, sys.version_info)
@staticmethod
def get_python_version():
""" Get the Python version
Returns:
:obj:`str`: the Python version
"""
return '{0[0]:d}.{0[1]:d}.{0[2]:d}'.format(sys.version_info)
def run_method_and_capture_stdout(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
Returns:
:obj:`str`: stdout
"""
with abduct.captured(abduct.out(), abduct.err()) as (stdout, stderr):
result = func(*args, **kwargs)
out_msg = stdout.getvalue()
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
return out_msg
def run_method_and_capture_stderr(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
"""
with abduct.captured(abduct.err()) as stderr:
result = func(*args, **kwargs)
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
def analyze_package(self, package_name, messages=None):
""" Perform static analyses of a package using Pylint.
The default options will identify the following issues:
* Unused imported modules, classes, functions, and variables
* Reimported modules, classes, functions, and variables
* Wild card imports outside of __init__.py
* Duplicate arguments and keys
* Missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
messages (:obj:`list` of :obj:`str`): list of Pylint checks to perform
"""
if messages is None:
messages = [
# variables
'W0611', # unused-import
'W0614', # unused-wildcard-import
'W0613', # unused-argument
'W0612', # unused-variable
# imports
'W0404', # reimported
'W0401', # wildcard-import
# similarities
'E0108', # duplicate-argument-name
'W0109', # duplicate-key
]
msg_opts = [
'--disable=all',
'--enable=' + ','.join(messages),
]
report_opts = [
'--reports=n',
'--score=n',
]
# TODO: debug, does not work:
epylint.lint(package_name, msg_opts + report_opts)
def find_missing_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: list of missing dependencies and their occurences in the code
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_missing_reqs.log.setLevel(logging.ERROR)
missing = pip_check_reqs.find_missing_reqs.find_missing_reqs(options)
# filter out optional dependencies
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = install_requires
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps += opt_deps
missing = list(filter(lambda m: m[0].replace('-', '_') not in all_deps, missing))
# sort missing
missing.sort(key=natsort.natsort_keygen(key=lambda m: m[0], alg=natsort.IGNORECASE))
return missing
def find_unused_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding unused_requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: name of the unused dependencies
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.ignore_reqs = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_extra_reqs.log.setLevel(logging.ERROR)
# get all requirements
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = set(install_requires)
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps = all_deps | set(opt_deps)
all_deps = [dep.replace('_', '-') for dep in all_deps]
# find unused requirements
with mock.patch('pip_check_reqs.common.find_required_modules', return_value=all_deps):
unuseds = pip_check_reqs.find_extra_reqs.find_extra_reqs(options)
# correct for editablly-installed packages
useds = pip_check_reqs.common.find_imported_modules(options).keys()
useds = [used.partition('.')[0].replace('_', '-') for used in useds]
unuseds = list(set(unuseds).difference(set(useds)))
# return canonical names
unuseds = [unused.replace('-', '_') for unused in unuseds]
# sort unuseds
unuseds.sort(key=natsort.natsort_keygen(alg=natsort.IGNORECASE))
return unuseds
def upload_package_to_pypi(self, dirname='.', repository='pypi', pypi_config_filename='~/.pypirc'):
""" Upload a package to PyPI
Args:
dirname (:obj:`str`, optional): path to package to upload
repository (:obj:`str`, optional): repository to upload code to (section in .pypirc or a full URL)
pypi_config_filename (:obj:`str`, optional): path to .pypirc
"""
# cleanup
if os.path.isdir(os.path.join(dirname, 'build')):
shutil.rmtree(os.path.join(dirname, 'build'))
if os.path.isdir(os.path.join(dirname, 'dist')):
shutil.rmtree(os.path.join(dirname, 'dist'))
# package code
subprocess.check_call([sys.executable, os.path.join(os.path.abspath(dirname), 'setup.py'), 'sdist', 'bdist_wheel'],
cwd=dirname)
# upload
options = []
if repository:
options += ['--repository', repository]
if pypi_config_filename:
options += ['--config-file', os.path.abspath(os.path.expanduser(pypi_config_filename))]
uploads = []
for path in glob.glob(os.path.join(dirname, 'dist', '*')):
uploads.append(path)
twine.commands.upload.main(options + uploads)
# cleanup
shutil.rmtree(os.path.join(dirname, 'build'))
shutil.rmtree(os.path.join(dirname, 'dist'))
def run_circleci_api(self, command, method='get', repo_type=None, repo_owner=None, repo_name=None,
data=None, circleci_api_token=None):
""" Run the CircleCI API
Args:
command (:obj:`str`): API command
method (:obj:`str`): type of HTTP request (get, post, delete)
repo_type (:obj:`str`, optional): repository type (e.g., github)
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
data (:obj:`str`, optional): data
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: CircleCI result
Raises:
:obj:`requests.exceptions.HTTPError`: if the HTTP request to CircleCI does not succeed
"""
if not repo_type:
repo_type = self.repo_type
if not repo_owner:
repo_owner = self.repo_owner
if not repo_name:
repo_name = self.repo_name
if not circleci_api_token:
circleci_api_token = self.circleci_api_token
url = '{}/project/{}/{}/{}{}?circle-token={}'.format(
self.CIRCLE_API_ENDPOINT, repo_type, repo_owner, repo_name, command, circleci_api_token)
request_method = getattr(requests, method)
response = request_method(url, json=data)
response.raise_for_status()
return response.json()
def get_build_config(self):
""" Get build configuration
Returns:
:obj:`dict`: build configuration
"""
with open('.karr_lab_build_utils.yml', 'r') as file:
return yaml.load(file)
def download_passwords(self, pull=False):
""" Download passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
"""
if six.PY3:
devnull = subprocess.DEVNULL
else:
devnull = open(os.devnull, 'wb')
if os.path.isdir(self.passwords_repo_path):
if pull:
subprocess.check_call(['git', 'pull'], cwd=self.passwords_repo_path,
stdout=devnull, stderr=devnull)
else:
url = self.passwords_repo_url.replace('://', '://{}:{}@'.format(
self.passwords_repo_username, self.passwords_repo_password))
subprocess.check_call(['git', 'clone', url, self.passwords_repo_path],
stdout=devnull, stderr=devnull)
def get_passwords(self, pull=False):
""" Read key/value pairs from the passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
Returns:
:obj:`dict`: key/value pairs
"""
self.download_passwords(pull=pull)
with open(os.path.join(self.passwords_repo_path, 'passwords.yml'), 'r') as file:
return yaml.load(file)
def set_env_vars_from_passwords(self, pull=False):
""" Create OS environment variables based on the key/value pairs in the passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
"""
passwords = self.get_passwords(pull=pull)
for name, val in passwords.items():
os.environ[name] = val
class TestResults(object):
""" Unit test results
Attributes:
cases (:obj:`list` of :obj:`TestCase`): test case results
"""
def __init__(self):
self.cases = []
@property
def num_tests(self):
return self.get_num_tests()
@property
def num_passed(self):
return self.get_num_passed()
@property
def num_skipped(self):
return self.get_num_skipped()
@property
def num_errors(self):
return self.get_num_errors()
@property
def num_failures(self):
return self.get_num_failures()
def get_num_tests(self):
""" Get the number of tests
Returns:
:obj:`int`: number of tests
"""
return len(self.cases)
def get_num_passed(self):
""" Get the number of tests that passed
Returns:
:obj:`int`: number of tests that passed
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.passed, self.cases)))
def get_num_skipped(self):
""" Get the number of skipped tests
Returns:
:obj:`int`: number of skipped tests
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.skipped, self.cases)))
def get_num_errors(self):
""" Get the number of tests with errors
Returns:
:obj:`int`: number of tests with errors
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.error, self.cases)))
def get_num_failures(self):
""" Get the number of tests with failures
Returns:
:obj:`int`: number of tests with failures
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.failure, self.cases)))
class TestCaseResult(object):
""" The result of a test case
Attributes:
classname (obj:`str`): name of the class of the test case
name (obj:`str`): name of the test case
filename (obj:`str`): file where the test was defined
line (obj:`int`): line where the test was defined
python_version (obj:`str`): python version which ran the test
type (obj:`TestCaseResultType`): type of the result (pass, skip, error, failure)
subtype (obj:`str`): detailed type of the result
message (obj:`str`): message from the result
details (obj:`str`): detailed message from the result
time (obj:`float`): duration of the time in seconds
stdout (obj:`str`): standard output
stderr (obj:`str`): standard error
"""
def __init__(self):
self.classname = None
self.name = None
self.filename = None
self.line = None
self.python_version = None
self.time = None
self.stdout = None
self.stderr = None
self.type = None
self.subtype = None
self.message = None
self.details = None
class TestCaseResultType(enum.Enum):
""" Type of test case result """
passed = 0
skipped = 1
error = 2
failure = 3
class BuildHelperError(Exception):
""" Represents :obj:`BuildHelper` errors """
pass
debugging
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from datetime import datetime
from jinja2 import Template
from pylint import epylint
from sphinx.cmd.build import main as sphinx_build
from sphinx.apidoc import main as sphinx_apidoc
from mock import patch
from six.moves import configparser
from xml.dom import minidom
import abduct
import attrdict
import capturer
import click
import coverage
import coveralls
import dateutil.parser
import email
import email.header
import email.message
import email.utils
import enum
import fnmatch
import ftputil
import github
import glob
import graphviz
# import instrumental.api
import json
import karr_lab_build_utils
import logging
import mock
import natsort
import networkx
import nose
import os
import pip
import pip_check_reqs
import pip_check_reqs.find_extra_reqs
import pip_check_reqs.find_missing_reqs
# import pkg_utils
# pkg_utils is not imported globally so that we can use karr_lab_build_utils to properly calculate its coverage
# :todo: figure out how to fix this
import pkg_resources
import pytest
import re
import requests
import shutil
import six
import smtplib
import subprocess
import sys
import tempfile
import time
import twine.commands.upload
import yaml
import warnings
import whichcraft
class CoverageType(enum.Enum):
""" Types of coverage """
statement = 0
branch = 1
multiple_condition = 2
decision = 2
class Environment(enum.Enum):
""" Environments to run tests """
local = 0
docker = 1
circleci = 2
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Archive reports to test history server, Coveralls, and Code Climate
Attributes:
test_runner (:obj:`str`): name of test runner {pytest, nose}
repo_name (:obj:`str`): repository name
repo_owner (:obj:`str`): name of the repository owner
repo_branch (:obj:`str`): repository branch name
repo_revision (:obj:`str`): sha of repository revision
build_num (:obj:`int`): CircleCI build number
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_xml_dir (:obj:`str`): local directory to store latest XML test report
proj_tests_xml_latest_filename (:obj:`str`): file name to store latest XML test report
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_doctrees_dir (:obj:`str`): local directory where doc trees should be saved
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
proj_docs_build_spelling_dir (:obj:`str`): local directory where spell check results should be saved
build_image (:obj:`str`): Docker image to use to run tests
passwords_repo_url (:obj:`str`): URL to Git repository with passwords
passwords_repo_username (:obj:`str`): username for Git repository with passwords
passwords_repo_password (:obj:`str`): password for Git repository with passwords
passwords_repo_path (:obj:`str`): path to clone Git repository with passwords
coveralls_token (:obj:`str`): Coveralls token
code_climate_token (:obj:`str`): Code Climate token
github_username (obj:`str`): GitHub username
github_password (obj:`str`): GitHub password
circleci_api_token (:obj:`str`): CircleCI API token
test_server_token (:obj:`str`): test history report server token
email_password (:obj:`obj:`str`): password for karr.lab.daemon@gmail.com
INITIAL_PACKAGE_VERSION (:obj:`str`): initial package version
DEFAULT_BUILD_IMAGE_VERSION (:obj:`str`): default build image version
DEFAULT_TEST_RUNNER (:obj:`str`): default test runner {pytest, nose}
DEFAULT_PROJ_TESTS_DIR (:obj:`str`): default local directory with test code
DEFAULT_PROJ_TESTS_XML_DIR (:obj:`str`): default local directory where the test reports generated should be saved
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME (:obj:`str`): default file name to store latest XML test report
DEFAULT_PROJ_DOCS_DIR (:obj:`str`): default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR (:obj:`str`): default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR (:obj:`str`): default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_SPELLING_DIR (:obj:`str`): default local directory where spell check results should be saved
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR (:obj:`str`): default local directory where generated HTML documentation should be saved
DEFAULT_BUILD_IMAGE (:obj:`str`): default Docker image to use to run tests
GITHUB_API_ENDPOINT (:obj:`str`): GitHub API endpoint
CIRCLE_API_ENDPOINT (:obj:`str`): CircleCI API endpoint
COVERALLS_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Coveralls
CODE_CLIMATE_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Code Climate
"""
INITIAL_PACKAGE_VERSION = '0.0.1'
DEFAULT_BUILD_IMAGE_VERSION = '0.0.22'
DEFAULT_TEST_RUNNER = 'pytest'
DEFAULT_PROJ_TESTS_DIR = 'tests'
DEFAULT_PROJ_TESTS_XML_DIR = 'tests/reports'
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME = 'latest'
DEFAULT_PROJ_DOCS_DIR = 'docs'
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR = 'docs/_build/doctrees'
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR = 'docs/_build/spelling'
DEFAULT_BUILD_IMAGE = 'karrlab/build:latest'
GITHUB_API_ENDPOINT = 'https://api.github.com'
CIRCLE_API_ENDPOINT = 'https://circleci.com/api/v1.1'
COVERALLS_ENABLED = True
CODE_CLIMATE_ENABLED = True
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.test_runner = os.getenv('TEST_RUNNER', self.DEFAULT_TEST_RUNNER)
if self.test_runner not in ['pytest', 'nose']:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
self.repo_type = 'github'
self.repo_name = os.getenv('CIRCLE_PROJECT_REPONAME')
self.repo_owner = os.getenv('CIRCLE_PROJECT_USERNAME') or 'KarrLab'
self.repo_branch = os.getenv('CIRCLE_BRANCH')
self.repo_revision = os.getenv('CIRCLE_SHA1')
try:
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM')))
except (TypeError, ValueError, ):
self.build_num = 0
self.proj_tests_dir = self.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_xml_dir = self.DEFAULT_PROJ_TESTS_XML_DIR
self.proj_tests_xml_latest_filename = self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME
self.proj_docs_dir = self.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = self.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = self.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_doctrees_dir = self.DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR
self.proj_docs_build_html_dir = self.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.proj_docs_build_spelling_dir = self.DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR
self.build_image = self.DEFAULT_BUILD_IMAGE
self.passwords_repo_url = 'https://github.com/KarrLab/karr_lab_passwords.git'
self.passwords_repo_username = 'karr-lab-daemon-public'
self.passwords_repo_password = os.getenv('PASSWORDS_REPO_PASSWORD')
self.passwords_repo_path = os.path.expanduser(os.path.join('~', '.wc', 'karr_lab_passwords'))
self.download_passwords(pull=True)
self.coveralls_token = os.getenv('COVERALLS_REPO_TOKEN')
self.code_climate_token = os.getenv('CODECLIMATE_REPO_TOKEN')
self.github_username = 'karr-lab-daemon'
self.github_password = self.get_passwords().get('GITHUB_PASSWORD', None)
self.circleci_api_token = self.get_passwords().get('CIRCLECI_API_TOKEN', None)
self.test_server_token = self.get_passwords().get('TEST_SERVER_TOKEN', None)
self.email_password = self.get_passwords().get('EMAIL_PASSWORD', None)
self.code_server_hostname = 'code.karrlab.org'
self.code_server_username = 'karrlab_code'
self.code_server_password = self.get_passwords().get('CODE_SERVER_PASSWORD', None)
self.code_server_directory = '/code.karrlab.org/repo'
#####################
# Create a package
#####################
def create_package(self):
""" Create a package
* Create a local Git repository
* Create a remote GitHub repository
* Add the repository to Code Climate
* Add the repository to Coveralls
* Add the repository to CircleCI project (by following the GitHub repository)
* Add environment variable for tokens for code.karrlab.org, Coveralls, Code Climate, and CircleCI
* Add environment variable for password for karr.lab.daemon@gmail.com
* Generate API token for status badge
* If the repository is not private, add the repository to Read the Docs
* Add the package to code.karrlab.org
* Add JSON-formatted file to ``ssh://code.karrlab.org:/home/karrlab_code/code.karrlab.org/repo/{{ name }}.json``
* Add badges for Code Climate, Coveralls, CircleCI, and Read the Docs to README.md
* Add package name to ``downstream_dependencies`` key in ``.karr_lab_build_utils.yml``
"""
# print introductory message
print('This program will guide you through creating a new package.')
click.confirm('Continue?', default=True, abort=True)
# gather basic information
name = click.prompt('Enter the name of the new package', type=str)
description = click.prompt('Enter a brief description of the new package', type=str)
keywords = click.prompt('Enter a comma-separated list of keywords for the new package', type=str, default=' ')
keywords = [kw.strip() for kw in keywords.strip().split(',') if kw.strip()]
dependencies = click.prompt(
'Enter a comma-separated list of Karr Lab packages that the new package depends on', type=str, default=' ')
dependencies = [dep.strip() for dep in dependencies.strip().split(',') if dep.strip()]
private = click.confirm('Should the repository be private?', default=True)
dirname = click.prompt('Enter the directory for the new package', type=str, default=os.path.join('.', name))
build_image_version = click.prompt('Enter the build image version to test the package',
type=str, default=self.DEFAULT_BUILD_IMAGE_VERSION)
github_username = click.prompt('Enter your GitHub username', type=str, default=self.github_username)
github_password = click.prompt('Enter your GitHub password', type=str, hide_input=True,
default='*' * len(self.github_password or ''))
if github_password == '*' * len(self.github_password or ''):
github_password = self.github_password
# create local and GitHub Git repositories
print('Creating {} remote Git repository "{}/{}" on GitHub and cloning this repository to "{}"'.format(
'private' if private else 'public', self.repo_owner, name, dirname))
self.create_repository(name, description=description, private=private, dirname=dirname,
github_username=github_username, github_password=github_password)
# Code Climate
# :todo: programmatically add repo to Code Climate and generate tokens
print('Visit "https://codeclimate.com/dashboard" and click on the "{}" organization.'.format(
self.repo_owner if private else 'Open source'))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Sync now" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add a repository" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add repo" button for the "{}" repository'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "settings" link'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Cick the "Test coverage" menu item')
click.confirm('Continue?', default=True, abort=True)
code_climate_repo_token = click.prompt('Enter the "test reporter id"')
print('Cick the "Badges" menu item')
click.confirm('Continue?', default=True, abort=True)
code_climate_repo_id = click.prompt('Enter the repository ID (ID in the URL https://codeclimate.com/repos/<id>/maintainability)')
code_climate_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://api.codeclimate.com/v1/badges/<token>/maintainability)')
# Coveralls
# :todo: programmatically add repo to Coveralls and generate tokens
print('Visit "https://coveralls.io/repos/new"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "SYNC REPOS" button')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}/{}" repository and click its "OFF" button'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the details button for the "{}/{}" repository'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Settings" menu item')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_token = click.prompt('Enter the "REPO TOKEN"')
print('Click the "README BADGE" EMBED" button')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://coveralls.io/repos/github/KarrLab/test_a/badge.svg?t=<token>')
# CircleCI
# :todo: programmatically create CircleCI build
# :todo: programmatically create CircleCI token for status badges
has_private_dependencies = False
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
for dependency in dependencies:
try:
repo = org.get_repo(dependency)
has_private_dependencies = has_private_dependencies or repo.private
except github.UnknownObjectException:
pass
print('Visit "https://circleci.com/add-projects/gh/KarrLab"')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}" repository and click its "Follow project" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Project settings" icon')
click.confirm('Continue?', default=True, abort=True)
if has_private_dependencies:
print('Click the "Checkout SSH keys" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Authorize with GitHub" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create and add ... user key" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "API permissions" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create Token" button')
click.confirm('Continue?', default=True, abort=True)
print('Select "All", enter a label, and click the "Add Token" button')
click.confirm('Continue?', default=True, abort=True)
circleci_repo_token = click.prompt('Enter the new token')
vars = {
'COVERALLS_REPO_TOKEN': coveralls_repo_token,
'CODECLIMATE_REPO_TOKEN': code_climate_repo_token,
'PASSWORDS_REPO_PASSWORD': self.passwords_repo_password,
}
self.set_circleci_environment_variables(vars, repo_name=name)
# Read the Docs
if not private:
# :todo: programmatically add repo to Read the Docs
print('Visit "https://readthedocs.org/dashboard/import/?"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "refresh" icon')
click.confirm('Continue?', default=True, abort=True)
print('Find the "{}" repository and click its "+" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Next" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Admin" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Advanced settings" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Requirements file" to "docs/requirements.txt"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python configuration file" to "docs/conf.py"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python interpreter" to "CPython 3.x"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Maintainers" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr" to the maintainers')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Notifications" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add your email address and click submit')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr@gmail.com" and click submit')
click.confirm('Continue?', default=True, abort=True)
# add package to code.karrlab.org
with open(pkg_resources.resource_filename('karr_lab_build_utils',
os.path.join('templates', 'code_server', '_package_.json')), 'r') as file:
template = Template(file.read())
fid, local_filename = tempfile.mkstemp()
os.close(fid)
context = {
'name': name,
'description': description,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_token': coveralls_repo_token,
'code_climate_repo_id': code_climate_repo_id,
}
template.stream(**context).dump(local_filename)
with ftputil.FTPHost(self.code_server_hostname, self.code_server_username, self.code_server_password) as ftp:
remote_filename = ftp.path.join(self.code_server_directory, '{}.json'.format(name))
ftp.upload(local_filename, remote_filename)
os.remove(local_filename)
# setup repository
self.setup_repository(name, description=description, keywords=keywords, dependencies=dependencies,
private=private, build_image_version=build_image_version, dirname=dirname,
circleci_repo_token=circleci_repo_token, coveralls_repo_badge_token=coveralls_repo_badge_token,
code_climate_repo_id=code_climate_repo_id, code_climate_repo_badge_token=code_climate_repo_badge_token)
# append package to downstream dependencies of dependencies
parent_dirname = os.path.dirname(dirname)
for dependency in dependencies:
config_filename = os.path.join(parent_dirname, dependency, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
if 'downstream_dependencies' not in config:
config['downstream_dependencies'] = []
config['downstream_dependencies'].append(name)
with open(config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
else:
warnings.warn(('Unable to append package to downstream dependency {} because the '
'downstream dependency is not available').format(dependency),
UserWarning)
def create_repository(self, name, description='', private=True, dirname=None, github_username=None, github_password=None):
""" Create a Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
dirname (:obj:`str`, optional): directory name for repository
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
"""
# process arguments
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
dirname = dirname or os.path.join('.', name)
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
# create GitHub repository
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
org.create_repo(name=name, description=description, private=private, auto_init=True)
# initialize Git
gitconfig_filename = os.path.expanduser('~/.gitconfig')
has_gitconfig = os.path.isfile(gitconfig_filename)
if has_gitconfig:
os.rename(gitconfig_filename, gitconfig_filename + '.ignore')
import pygit2
credentials = pygit2.UserPass(github_username, github_password)
callbacks = pygit2.RemoteCallbacks(credentials=credentials)
pygit2.clone_repository('https://github.com/KarrLab/{}.git'.format(name), dirname, callbacks=callbacks)
if has_gitconfig:
os.rename(gitconfig_filename + '.ignore', gitconfig_filename)
def setup_repository(self, name, description='', keywords=None, dependencies=None, private=True, build_image_version=None,
dirname=None, circleci_repo_token=None, coveralls_repo_badge_token=None, code_climate_repo_id=None,
code_climate_repo_badge_token=None):
""" Setup Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
keywords (:obj:`list` of :obj:`str`, optional): list of keywords
dependencies (:obj:`list` of :obj:`str`, optional): list of Karr Lab packages that the package depends on
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
build_image_version (:obj:`str`, optional): build image version
dirname (:obj:`str`, optional): directory name
circleci_repo_token (:obj:`str`, optional): CircleCI API token (e.g. for badges) for the repository
coveralls_repo_badge_token (:obj:`str`, optional): Coveralls badge token for the repository
code_climate_repo_id (:obj:`str`, optional): Code Climate ID for the repository
code_climate_repo_badge_token (:obj:`str`, optional): Code Climate for the repository
"""
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
keywords = keywords or []
dependencies = dependencies or []
if not build_image_version:
build_image_version = self.DEFAULT_BUILD_IMAGE_VERSION
dirname = dirname or os.path.join('.', name)
# create a directory for the repository
if not os.path.isdir(dirname):
os.makedirs(dirname)
# create files
filenames = (
'.gitignore',
'LICENSE',
'MANIFEST.in',
'README.md',
'requirements.txt',
'requirements.optional.txt',
'setup.py',
'setup.cfg',
'tests/requirements.txt',
'tests/test_core.py',
'tests/test_main.py',
'.circleci/config.yml',
'.readthedocs.yml',
'.karr_lab_build_utils.yml',
'_package_/__init__.py',
'_package_/VERSION',
'_package_/core.py',
'_package_/__main__.py',
)
now = datetime.now()
context = {
'name': name,
'description': description,
'keywords': keywords,
'version': self.INITIAL_PACKAGE_VERSION,
'year': now.year,
'date': '{}-{}-{}'.format(now.year, now.month, now.day),
'dependencies': dependencies,
'build_image_version': build_image_version,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_badge_token': coveralls_repo_badge_token,
'code_climate_repo_id': code_climate_repo_id,
'code_climate_repo_badge_token': code_climate_repo_badge_token,
}
for filename in filenames:
if os.path.dirname(filename) and not os.path.isdir(os.path.join(dirname, os.path.dirname(filename))):
os.makedirs(os.path.join(dirname, os.path.dirname(filename)))
with open(pkg_resources.resource_filename(
'karr_lab_build_utils',
os.path.join('templates', filename)), 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, filename))
os.rename(os.path.join(dirname, '_package_'), os.path.join(dirname, name))
self.create_documentation_template(dirname)
###########################
# Register repo on CircleCI
###########################
def follow_circleci_build(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None,
has_private_dependencies=False):
""" Follow CircleCI build for a repository
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
has_private_dependencies (:obj:`bool`, optional): if :obj:`True`, add a GitHub SSH key for the Karr Lab machine user to the build
Raises:
:obj:`ValueError`: if a CircleCI build wasn't followed and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# follow repo
result = self.run_circleci_api('/follow',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
if 'following' not in result or not result['following']:
raise ValueError(
'Unable to follow CircleCI build for repository {}/{}'.format(repo_owner, repo_name))
# add checkout key
if has_private_dependencies:
# :todo: add a GitHub SSH key for the Karr Lab machine user to the build
pass # pragma: no cover
def get_circleci_environment_variables(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Get the CircleCI environment variables for a repository and their partial values
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their partial values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
vars = self.run_circleci_api('/envvar',
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
return {var['name']: var['value'] for var in vars}
def set_circleci_environment_variables(self, vars, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Set the CircleCI environment variables for a repository
Args:
vars (:obj:`dict`): dictionary of environment variables to set
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# get current environment variables
old_vars = self.get_circleci_environment_variables(
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# update environment variables
for name, value in vars.items():
# delete environment variables which we want to overwrite
if name in old_vars:
self.delete_circleci_environment_variable(name,
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# add environment variable
self.run_circleci_api('/envvar',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token, data={'name': name, 'value': value})
def delete_circleci_environment_variable(self, var, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Delete a CircleCI environment variable for a repository
Args:
var (:obj:`str`): name of variable to delete
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
self.run_circleci_api('/envvar/{}'.format(var),
method='delete', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
def create_code_climate_github_webhook(self, repo_type=None, repo_owner=None, repo_name=None,
github_username=None, github_password=None):
""" Create GitHub webhook for Code Climate
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
Raises:
:obj:`ValueError`: if webhook wasn't created and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
url = '{}/repos/{}/{}/hooks'.format(self.GITHUB_API_ENDPOINT, repo_owner, repo_name)
response = requests.post(url, auth=(github_username, github_password), json={
'name': 'web',
'config': {
'url': 'https://codeclimate.com/webhooks',
'content_type': 'form',
},
'events': [
'push',
'pull_request'
],
'active': True,
})
if response.status_code != 201:
if 'errors' in response.json():
msg = '\n '.join(err['message'] for err in response.json()['errors'])
raise ValueError('Unable to create webhook for {}/{}:\n {}'.format(repo_owner, repo_name, msg))
else:
msg = response.json()['message']
raise ValueError('Unable to create webhook for {}/{}: {}'.format(repo_owner, repo_name, msg))
#########################
# Installing dependencies
#########################
def install_requirements(self):
""" Install requirements """
# upgrade pip, setuptools
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'setuptools'])
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'pip'])
# requirements for package
self._install_requirements_helper('requirements.txt')
self._install_requirements_helper('requirements.optional.txt', ignore_options=True)
self._install_requirements_helper(os.path.join(self.proj_tests_dir, 'requirements.txt'))
self._install_requirements_helper(os.path.join(self.proj_docs_dir, 'requirements.txt'))
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
def _install_requirements_helper(self, filename, ignore_options=False):
""" Install the packages in a requirements.txt file, including all optional dependencies
Args:
filename (:obj:`str`): path to requirements file
ignore_options (:obj:`bool`, optional): if :obj:`True`, ignore option headings
(e.g. for requirements.optional.txt)
"""
if not os.path.isfile(filename):
return
# create a temporary file that has the optional markings removed
if ignore_options:
sanitized_file, sanitized_filename = tempfile.mkstemp(suffix='.txt')
os.close(sanitized_file)
with open(filename, 'r') as file:
with open(sanitized_filename, 'w') as sanitized_file:
for line in file:
line = line.strip()
if line and line[0] == '[':
continue
sanitized_file.write(line + '\n')
filename = sanitized_filename
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links', '-r', filename])
# cleanup temporary file
if ignore_options:
os.remove(sanitized_filename)
def upgrade_requirements(self):
""" Upgrade requirements from the Karr Lab's GitHub organization
Returns:
:obj:`list` of :obj:`str`: upgraded requirements from the Karr Lab's GitHub organization
"""
# get PyPI requirements
lines = self.run_method_and_capture_stdout(pip.main, ['freeze'])
pkgs = []
for line in lines.split('\n'):
if not line.startswith('-e') and '==' in line:
pkgs.append(line.partition('==')[0])
infos = self.run_method_and_capture_stdout(pip.main, ['show'] + pkgs)
reqs = []
for info in infos.split('---\n'):
if 'github.com/KarrLab/' in info:
name = info.partition('Name: ')[2].partition('\n')[0].replace('-', '_')
url = info.partition('Home-page: ')[2].partition('\n')[0]
reqs.append('git+{}.git#egg={}[all]'.format(url, name))
# ugrade PyPI requirements
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links'] + reqs)
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
return reqs
########################
# Running tests
########################
def run_tests(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, environment=Environment.local, exit_on_failure=True,
ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
environment (:obj:`str`, optional): environment to run tests (local, docker, or circleci-local-executor)
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key; needed for Docker environment
Raises:
:obj:`BuildHelperError`: If the environment is not supported or the package directory not set
"""
if environment == Environment.local:
self._run_tests_local(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, exit_on_failure=exit_on_failure)
elif environment == Environment.docker:
self._run_tests_docker(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, ssh_key_filename=ssh_key_filename)
elif environment == Environment.circleci:
self._run_tests_circleci(dirname=dirname, test_path=test_path, verbose=verbose, ssh_key_filename=ssh_key_filename)
else:
raise BuildHelperError('Unsupported environment: {}'.format(environment))
def _run_tests_local(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, exit_on_failure=True):
""" Run unit tests located at `test_path` locally
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
Raises:
:obj:`BuildHelperError`: If the package directory not set
"""
self.set_env_vars_from_passwords()
py_v = self.get_python_version()
abs_xml_latest_filename = os.path.join(
self.proj_tests_xml_dir, '{0}.{1}.xml'.format(self.proj_tests_xml_latest_filename, py_v))
if with_coverage:
if coverage_type == CoverageType.statement:
cov = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True)
cov.start()
elif coverage_type == CoverageType.branch:
cov = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True, branch=True)
cov.start()
# elif coverage_type == CoverageType.multiple_condition:
# # :todo: support instrumental once its dependency astkit is updated for Python 3
# parser = configparser.ConfigParser()
# parser.read(os.path.join(dirname, 'setup.cfg'))
# targets = parser.get('coverage:run', 'source').strip().split('\n')
# targets = [target.strip() for target in targets]
#
# opts = attrdict.AttrDict({
# 'file': os.path.join(coverage_dirname, '.coverage.' + py_v),
# 'report': False,
# 'label': False,
# 'summary': False,
# 'statements': False,
# 'xml': False,
# 'html': False,
# 'all': False,
# 'targets': targets,
# 'ignores': [],
# 'report_conditions_with_literals': False,
# 'instrument_assertions': True,
# 'use_metadata_cache': False,
# 'instrument_comparisons': True,
# })
# cov = instrumental.api.Coverage(opts, os.getcwd())
# cov.start(opts.targets, opts.ignores)
else:
raise BuildHelperError('Unsupported coverage type: {}'.format(coverage_type))
if with_xunit and not os.path.isdir(self.proj_tests_xml_dir):
os.makedirs(self.proj_tests_xml_dir)
if self.test_runner == 'pytest':
test_path = test_path.replace(':', '::')
test_path = re.sub('::(.+?)(\.)', r'::\1::', test_path)
argv = [test_path]
if verbose:
argv.append('--capture=no')
if with_xunit:
argv.append('--junitxml=' + abs_xml_latest_filename)
result = pytest.main(argv)
elif self.test_runner == 'nose':
test_path = test_path.replace('::', ':', 1)
test_path = test_path.replace('::', '.', 1)
argv = ['nosetests', test_path]
if verbose:
argv.append('--nocapture')
if with_xunit:
argv += ['--with-xunit', '--xunit-file', abs_xml_latest_filename]
result = int(not nose.run(argv=argv))
else:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
if with_coverage:
cov.stop() # pragma: no cover # this line can't be covered
cov.save()
if exit_on_failure and result != 0:
sys.exit(1)
def _run_tests_docker(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.branch, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using a Docker image:
#. Create a container based on the build image (e.g, karrlab/build:latest)
#. Copy your GitHub SSH key to the container
#. Remove Python cache directories (``__pycache__``) from the package
#. Copy the package to the container at ``/root/projects``
#. Install the Karr Lab build utilities into the container
#. Install the requirements for the package in the container
#. Run the tests inside the container using the same version of Python that called this method
#. Delete the container
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
# pick container name
basename = os.path.basename(os.path.abspath(dirname))
now = datetime.now()
container = 'build-{0}-{1.year}-{1.month}-{1.day}-{1.hour}-{1.minute}-{1.second}'.format(basename, now)
# get Python version
py_v = '{}.{}'.format(sys.version_info[0], sys.version_info[1])
# create container
print('\n\n')
print('=====================================')
print('== Creating container')
print('=====================================')
self._run_docker_command(['run', '-it', '-d', '--name', container, self.build_image, 'bash'])
# copy GitHub SSH key to container
print('\n\n')
print('=====================================')
print('== Copying SSH key to container')
print('=====================================')
self._run_docker_command(['cp', ssh_key_filename, container + ':/root/.ssh/'])
# delete __pycache__ directories
print('\n\n')
print('=====================================')
print('== Deleting __pycache__ directories')
print('=====================================')
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# copy package to container
print('\n\n')
print('=====================================')
print('== Copying package to container')
print('=====================================')
self._run_docker_command(['cp', os.path.abspath(dirname), container + ':/root/project'])
# install pkg_utils
print('\n\n')
print('=====================================')
print('== Install pkg_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/pkg_utils.git#egg=pkg_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install Karr Lab build utils
print('\n\n')
print('=====================================')
print('== Install karr_lab_build_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/karr_lab_build_utils.git#egg=karr_lab_build_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install package
print('\n\n')
print('=====================================')
print('== Install package')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && pip{} install --process-dependency-links -e .'.format(py_v)])
# install dependencies
print('\n\n')
print('=====================================')
print('== Install dependencies')
print('=====================================')
self._run_docker_command(['exec',
'--env', 'PASSWORDS_REPO_PASSWORD={}'.format(self.passwords_repo_password),
container,
'bash', '-c',
'cd /root/project && karr_lab_build_utils{} upgrade-requirements'.format(py_v)])
# test package in container
print('\n\n')
print('=====================================')
print('== Running tests')
print('=====================================')
options = []
options += ['--test-path', test_path]
if with_coverage:
options += ['--with-coverage', '--coverage-type', coverage_type.name]
if with_xunit:
options.append('--with-xunit')
if verbose:
options.append('--verbose')
self._run_docker_command(['exec',
'--env', 'PASSWORDS_REPO_PASSWORD={}'.format(self.passwords_repo_password),
container,
'bash', '-c',
'cd /root/project && karr_lab_build_utils{} run-tests {}'.format(py_v, ' '.join(options))],
raise_error=False)
if with_coverage:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', '.coverage.{}.*'.format(py_v))])
match = re.search('/root/project/(\.coverage\.\d+\.\d+\.\d+)', out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(coverage_dirname, match.group(1))])
if with_xunit:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', self.DEFAULT_PROJ_TESTS_XML_DIR,
'{}.{}.*.xml'.format(self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME, py_v))])
match = re.search('/root/project/{}/({}\.\d+\.\d+\.\d+.xml)'.format(self.DEFAULT_PROJ_TESTS_XML_DIR,
self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME), out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(self.proj_tests_xml_dir, match.group(1))])
# stop and remove container
print('\n\n')
print('=====================================')
print('== Removing container')
print('=====================================')
self._run_docker_command(['rm', '-f', container])
def _run_docker_command(self, cmd, cwd=None, raise_error=True):
""" Run a docker command
Args:
cmd (:obj:`list`): docker command to run
cwd (:obj:`str`, optional): directory from which to run :obj:`cmd`
raise_error (:obj:`bool`, optional): if true, raise errors
Returns:
:obj:`str`: standard output
Raises:
:obj:`BuildHelperError`: if the docker command fails
"""
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['docker'] + cmd, cwd=cwd)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
if process.returncode != 0 and raise_error:
raise BuildHelperError(out)
return out
def _run_tests_circleci(self, dirname='.', test_path='tests', verbose=False, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using the CircleCI local executor. This will run the same commands defined in
``.circle/config.yml`` as the cloud version of CircleCI.
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
Raises:
:obj:`BuildHelperError`: if the tests fail
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
karr_lab_build_utils_dirname = os.path.expanduser('~/Documents/karr_lab_build_utils')
# delete __pycache__ directories
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# update CircleCI to use build image with SSH key
circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml')
backup_circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml.save')
with open(circleci_config_filename, 'r') as file:
config = yaml.load(file)
image_name = config['jobs']['build']['docker'][0]['image']
if image_name.endswith('.with_ssh_key'):
image_with_ssh_key_name = image_name
image_name = image_name[:-13]
else:
image_with_ssh_key_name = image_name + '.with_ssh_key'
shutil.copyfile(circleci_config_filename, backup_circleci_config_filename)
config['jobs']['build']['docker'][0]['image'] = image_with_ssh_key_name
with open(circleci_config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# Build docker image with SSH key
circleci_context_dirname = os.path.join(karr_lab_build_utils_dirname, 'circleci_docker_context')
if not os.path.isdir(circleci_context_dirname):
os.makedirs(circleci_context_dirname)
shutil.copy(ssh_key_filename, os.path.join(circleci_context_dirname, 'GITHUB_SSH_KEY'))
dockerfile_filename = os.path.join(circleci_context_dirname, 'Dockerfile_Circleci')
with open(dockerfile_filename, 'w') as file:
file.write('FROM {}\n'.format(image_name))
file.write('COPY circleci_docker_context/GITHUB_SSH_KEY /root/.ssh/id_rsa\n')
file.write('ENV TEST_SERVER_TOKEN={}\n'.format(self.test_server_token or ''))
file.write('RUN eval `ssh-agent` && ssh-add /root/.ssh/id_rsa\n')
file.write('CMD bash\n')
self._run_docker_command(['build',
'--tag', image_with_ssh_key_name,
'-f', os.path.join('circleci_docker_context', 'Dockerfile_Circleci'),
'.'],
cwd=karr_lab_build_utils_dirname)
# test package
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['circleci',
'--env', 'test_path={}'.format(test_path),
'--env', 'verbose={:d}'.format(verbose),
'--env', 'dry_run=1',
'--env', 'PASSWORDS_REPO_PASSWORD={}'.format(self.passwords_repo_password),
'build'], cwd=dirname)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
# revert CircleCI config file
os.remove(circleci_config_filename)
shutil.move(backup_circleci_config_filename, circleci_config_filename)
# delete docker image
self._run_docker_command(['rmi', image_with_ssh_key_name], raise_error=False)
# cleanup circleci context
shutil.rmtree(circleci_context_dirname)
# raise error if tests didn't pass
if process.returncode != 0 or 'Task failed' in out:
raise BuildHelperError(out.encode('utf-8'))
def get_test_results(self):
""" Load test results from a set of XML files
Results:
:obj:`TestResults`: test results
"""
test_results = TestResults()
filename_pattern = os.path.join(self.proj_tests_xml_dir,
'{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for filename in glob.glob(filename_pattern):
match = re.match('^{}\.(.*?)\.xml$'.format(self.proj_tests_xml_latest_filename), os.path.basename(filename))
python_version = match.group(1)
doc = minidom.parse(filename)
suite = doc.getElementsByTagName('testsuite')[0]
for case in suite.getElementsByTagName('testcase'):
case_result = TestCaseResult()
case_result.classname = case.getAttribute('classname')
case_result.name = case.getAttribute('name')
case_result.python_version = python_version
case_result.time = float(case.getAttribute('time'))
if case.hasAttribute('file'):
case_result.file = case.getAttribute('file')
if case.hasAttribute('line'):
case_result.line = int(float(case.getAttribute('line')))
stdout = case.getElementsByTagName('system-out')
if stdout:
case_result.stdout = ''.join([child.nodeValue for child in stdout[0].childNodes])
stderr = case.getElementsByTagName('system-err')
if stderr:
case_result.stderr = ''.join([child.nodeValue for child in stderr[0].childNodes])
skip = case.getElementsByTagName('skipped')
error = case.getElementsByTagName('error')
failure = case.getElementsByTagName('failure')
if skip:
case_result.type = TestCaseResultType.skipped
elif error:
case_result.type = TestCaseResultType.error
elif failure:
case_result.type = TestCaseResultType.failure
else:
case_result.type = TestCaseResultType.passed
not_pass = skip or error or failure
if not_pass:
case_result.subtype = not_pass[0].getAttribute('type')
case_result.message = not_pass[0].getAttribute('message')
case_result.details = ''.join([child.nodeValue for child in not_pass[0].childNodes])
test_results.cases.append(case_result)
return test_results
def get_test_results_status(self, test_results, installation_error, tests_error, other_error, dry_run=False):
""" Get the status of a set of results
* Old err
* New error
* Fixed error
* New downstream error
Args:
test_results (:obj:`TestResults`): test results
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
if dry_run:
return {
'is_fixed': False,
'is_old_error': False,
'is_new_error': False,
'is_other_error': False,
'is_new_downstream_error': False,
}
# determine if there is an error
if (installation_error or tests_error or other_error) and test_results.get_num_tests() == 0:
is_other_error = True
is_new_error = False
is_old_error = False
is_fixed = False
else:
is_other_error = False
passed = test_results.get_num_errors() == 0 and test_results.get_num_failures() == 0
# determine if error is new
if self.build_num <= 1:
if passed:
is_old_error = False
is_new_error = False
is_fixed = True
else:
is_old_error = False
is_new_error = True
is_fixed = False
else:
prev_result = self.run_circleci_api('/' + str(self.build_num - 1))
if passed:
is_old_error = False
is_new_error = False
is_fixed = prev_result['status'] not in ['success', 'fixed']
else:
is_old_error = prev_result['status'] not in ['success', 'fixed']
is_new_error = prev_result['status'] in ['success', 'fixed']
is_fixed = False
# determine if build was triggered by an upstream package
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
if upstream_repo_name and is_new_error and self.build_num > 1 and not is_other_error:
is_new_downstream_error = True
else:
is_new_downstream_error = False
return {
'is_fixed': is_fixed,
'is_old_error': is_old_error,
'is_new_error': is_new_error,
'is_other_error': is_other_error,
'is_new_downstream_error': is_new_downstream_error,
}
def do_post_test_tasks(self, installation_error, tests_error, dry_run=False):
""" Do all post-test tasks for CircleCI
* Make test and coverage reports
* Compile documentation
* Archive test and coverage reports to the Karr Lab test history server, Coveralls, and Code Climate
* Trigger tests of downstream dependencies
* Notify authors of new failures in downstream packages
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:obj:`dict`: status of a set of results
"""
try:
static_analyses = self.make_and_archive_reports(dry_run=dry_run)
other_error = False
except Exception as exception:
static_analyses = {'missing_requirements': [], 'unused_requirements': []}
other_error = True
triggered_packages = self.trigger_tests_of_downstream_dependencies(dry_run=dry_run)
status = self.send_email_notifications(installation_error, tests_error, other_error, static_analyses, dry_run=dry_run)
return (triggered_packages, status)
def send_email_notifications(self, installation_error, tests_error, other_error, static_analyses, dry_run=False):
""" Send email notifications of failures, fixes, and downstream failures
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
static_analyses (:obj:`dict`): analyses of missing and unused requirements
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
test_results = self.get_test_results()
status = self.get_test_results_status(test_results, installation_error, tests_error, other_error, dry_run=dry_run)
# stop if this is a dry run
if dry_run:
return status
# build context for email
result = self.run_circleci_api('/' + str(self.build_num))
context = {
'repo_name': self.repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': self.build_num,
'build_url': result['build_url'],
'test_results': test_results,
'static_analyses': static_analyses,
}
if status['is_new_downstream_error']:
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
context['upstream'] = {
'repo_name': upstream_repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': upstream_build_num,
'build_url': result['build_url'],
}
config = self.get_build_config()
recipients = config.get('email_notifications', [])
# send notifications
if status['is_fixed']:
subject = '[Builds] [{0}] {0} is fixed!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'fixed.html', context)
elif status['is_old_error']:
subject = '[Builds] [{0}] {0} is still broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'old_error.html', context)
elif status['is_new_error']:
subject = '[Builds] [{0}] {0} has been broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'new_error.html', context)
elif status['is_other_error']:
subject = '[Builds] [{0}] {0} is broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'other_error.html', context)
if status['is_new_downstream_error']:
recipients.append('wholecell-developers@googlegroups.com')
subject = '[Builds] [{1}] commit {0} to {1} may have broken {2}'.format(
context['upstream']['commit'], context['upstream']['repo_name'], context['repo_name'])
self._send_notification_email(recipients, subject, 'new_downstream_error.html', context)
return status
def _send_notification_email(self, recipients, subject, template_filename, context, dry_run=False):
""" Send an email notification of test results
Args:
recipients (:obj:`list` of :obj:`str`): recipient email addresses
subject (:obj:`str`): subject
template_filename (obj:`str`): path to template
context (obj:`dict`): context for template
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
full_template_filename = pkg_resources.resource_filename(
'karr_lab_build_utils', os.path.join('templates', 'email_notifications', template_filename))
with open(full_template_filename, 'r') as file:
template = Template(file.read())
body = template.render(**context)
msg = email.message.Message()
msg['From'] = email.utils.formataddr((str(email.header.Header('Karr Lab Build System', 'utf-8')), 'noreply@karrlab.org'))
tos = []
for recipient in recipients:
tos.append(email.utils.formataddr((None, recipient)))
msg['To'] = ', '.join(tos)
msg['Subject'] = subject
msg.add_header('Content-Type', 'text/html')
msg.set_payload(body)
if not dry_run:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.ehlo()
smtp.starttls()
smtp.login('karr.lab.daemon', self.email_password)
smtp.sendmail('noreply@karrlab.org', recipients, msg.as_string())
smtp.quit()
def make_and_archive_reports(self, coverage_dirname='.', dry_run=False):
""" Make and archive reports:
* Upload test report to history server
* Upload coverage report to Coveralls and Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: analyses of missing and unused requirements
"""
errors = []
""" test reports """
# Upload test report to history server
self.archive_test_report()
""" coverage """
# Merge coverage reports
# Generate HTML report
# Upload coverage report to Coveralls and Code Climate
self.combine_coverage_reports(coverage_dirname=coverage_dirname)
self.archive_coverage_report(coverage_dirname=coverage_dirname, dry_run=dry_run)
""" static analysis """
config = self.get_build_config()
ignore_files = config.get('static_analyses', {}).get('ignore_files', [])
missing_reqs = self.find_missing_requirements(self.repo_name, ignore_files=ignore_files)
if missing_reqs:
errors.append('The following requirements are missing:\n {}'.format(
'\n '.join(missing_req[0] for missing_req in missing_reqs)))
unused_reqs = self.find_unused_requirements(self.repo_name, ignore_files=ignore_files)
if unused_reqs:
msg = 'The following requirements appear to be unused:\n {}'.format('\n '.join(unused_reqs))
warnings.warn(msg, UserWarning)
""" documentation """
self.make_documentation()
""" Throw error """
if errors:
raise BuildHelperError('\n\n'.join(errors))
return {
'missing_requirements': missing_reqs,
'unused_requirements': unused_reqs,
}
########################
# Test reports
########################
def archive_test_report(self):
""" Upload test report to history server
Raises:
:obj:`BuildHelperError`: if there is an error uploading the report to the test history server
"""
if not self.test_server_token or \
self.repo_name is None or \
self.repo_owner is None or \
self.repo_branch is None or \
self.repo_revision is None:
return
abs_xml_latest_filename_pattern = os.path.join(
self.proj_tests_xml_dir, '{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for abs_xml_latest_filename in glob.glob(abs_xml_latest_filename_pattern):
match = re.match('^.*?\.(\d+\.\d+\.\d+)\.xml$', abs_xml_latest_filename)
pyv = match.group(1)
r = requests.post('http://tests.karrlab.org/rest/submit_report',
data={
'token': self.test_server_token,
'repo_name': self.repo_name,
'repo_owner': self.repo_owner,
'repo_branch': self.repo_branch,
'repo_revision': self.repo_revision,
'build_num': self.build_num,
'report_name': pyv,
},
files={
'report': open(abs_xml_latest_filename, 'rb'),
})
r.raise_for_status()
r_json = r.json()
if 'success' not in r_json or not r_json['success']:
raise BuildHelperError('Error uploading report to test history server: {}'.format(r_json['message']))
########################
# Coverage reports
########################
def combine_coverage_reports(self, coverage_dirname='.'):
"""
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
"""
data_paths = []
for name in glob.glob(os.path.join(coverage_dirname, '.coverage.*')):
data_path = tempfile.mktemp()
shutil.copyfile(name, data_path)
data_paths.append(data_path)
# stop if there are no files to combine
if not data_paths:
warnings.warn('No coverage files exist to combine', UserWarning)
return
coverage_doc = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
coverage_doc.combine(data_paths=data_paths)
coverage_doc.save()
def archive_coverage_report(self, coverage_dirname='.', dry_run=False):
""" Archive coverage report:
* Upload report to Coveralls
* Upload report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
# upload to Coveralls
if self.COVERALLS_ENABLED:
self.upload_coverage_report_to_coveralls(coverage_dirname=coverage_dirname, dry_run=dry_run)
# upload to Code Climate
if self.CODE_CLIMATE_ENABLED:
self.upload_coverage_report_to_code_climate(coverage_dirname=coverage_dirname, dry_run=dry_run)
def upload_coverage_report_to_coveralls(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Coveralls
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Coveralls', UserWarning)
return
if self.coveralls_token:
runner = coveralls.Coveralls(True, repo_token=self.coveralls_token,
service_name='circle-ci', service_job_id=self.build_num)
def get_coverage():
workman = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
return coveralls.reporter.CoverallReporter(workman, workman.config).report()
with patch.object(coveralls.Coveralls, 'get_coverage', return_value=get_coverage()):
runner.wear(dry_run=dry_run)
def upload_coverage_report_to_code_climate(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
Raises:
:obj:`BuildHelperError`: If error uploading code coverage to Code Climate
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Code Climate', UserWarning)
return
# save coverage data to xml
xml_cov_filename = 'coverage.xml'
workman = coverage.Coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
workman.xml_report(outfile=xml_cov_filename)
# download the Code Climate test reporter
response = requests.get('https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64')
response.raise_for_status()
cc_path = os.path.expanduser('~/cc-test-reporter')
with open(cc_path, 'wb') as file:
file.write(response.content)
os.chmod(cc_path, 0o755)
# run the reporter
if not dry_run:
subprocess.check_call([cc_path, 'before-build'])
subprocess.check_call([cc_path, 'after-build',
'-t', 'coverage.py',
'-r', self.code_climate_token,
])
########################
# Documentation
########################
def create_documentation_template(self, dirname='.'):
""" Create Sphinx documentation template for a package
Args:
dirname (:obj:`str`, optional): path to package
Raises:
:obj:`ValueError`: if no package or more than one package is specified
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
packages = parser.get('sphinx-apidocs', 'packages').strip().split('\n')
if len(packages) != 1:
raise ValueError('Sphinx configuration auto-generation only supports 1 package')
if not os.path.isdir(os.path.join(dirname, self.proj_docs_dir)):
os.mkdir(os.path.join(dirname, self.proj_docs_dir))
for package in packages:
filenames = [
'conf.py',
'requirements.txt',
'conda.environment.yml',
'spelling_wordlist.txt',
'index.rst',
'overview.rst',
'installation.rst',
'about.rst',
'references.rst',
'references.bib',
]
context = {
"package": package,
'version': self.INITIAL_PACKAGE_VERSION,
'year': datetime.now().year,
'package_underline': '=' * len(package),
}
for filename in filenames:
template_filename = pkg_resources.resource_filename('karr_lab_build_utils', os.path.join('templates', 'docs', filename))
with open(template_filename, 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, self.proj_docs_dir, filename))
def make_documentation(self, spell_check=False):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Args:
spell_check (:obj:`bool`): if :obj:`True`, run spell checking
Raises:
:obj:`BuildHelperError`: If project name not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
os.mkdir(self.proj_docs_static_dir)
# build HTML documentation
self.run_method_and_capture_stderr(sphinx_build, [self.proj_docs_dir, self.proj_docs_build_html_dir])
# run spell check
if spell_check:
self.run_method_and_capture_stderr(sphinx_build, [
'-b', 'spelling',
'-d', self.proj_docs_build_doctrees_dir,
self.proj_docs_dir,
self.proj_docs_build_spelling_dir,
])
def compile_downstream_dependencies(self, dirname='.', packages_parent_dir='..', config_filename=None):
""" Compile the downstream dependencies of a package and save them to :obj:`config_filename`
Args:
dirname (:obj:`str`, optional): path to package
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
config_filename (:obj:`str`, optional): path to save configuration with list of downstream dependencies
in YAML format
Returns:
:obj:`list` of :obj:`str`: downstream dependencies
Raises:
:obj:`BuildHelperError`: if a package has more than one module
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
packages_parent_dir = os.path.abspath(packages_parent_dir)
# get the name of the current package
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
tmp = parser.get('coverage:run', 'source').strip().split('\n')
if len(tmp) != 1:
raise BuildHelperError('Package should have only one module')
this_pkg_name = tmp[0]
# collect the downstream dependencies by analyzing the requirements files of other packages
# :todo: support branches
downstream_dependencies = []
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
other_pkg_name = dirname[len(packages_parent_dir) + 1:]
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
if this_pkg_name in install_requires or this_pkg_name in extras_require['all']:
downstream_dependencies.append(other_pkg_name)
# save the downstream dependencies to a file
if config_filename:
config = {}
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
config['downstream_dependencies'] = downstream_dependencies
with open(config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# return the downstream dependencies
return downstream_dependencies
def are_package_dependencies_acyclic(self, packages_parent_dir='..'):
""" Check if the package dependencies are acyclic so they are supported by CircleCI
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
Returns:
:obj:`bool`: :obj:`True` if the package dependencies are acyclic
"""
graph = networkx.DiGraph()
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
graph.add_node(pkg)
# create edges for dependencies
config_filename = os.path.join(dirname, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
deps = config.get('downstream_dependencies', [])
for other_pkg in deps:
graph.add_edge(pkg, other_pkg)
try:
networkx.algorithms.cycles.find_cycle(graph)
return False
except networkx.NetworkXNoCycle:
return True
def visualize_package_dependencies(self, packages_parent_dir='..', out_filename='../package_dependencies.pdf'):
""" Visualize downstream package dependencies as a graph
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
out_filename (:obj:`str`, optional): path to save visualization
"""
basename, format = os.path.splitext(out_filename)
dot = graphviz.Digraph(format=format[1:])
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
dot.node(pkg, pkg)
# create edges for dependencies
config_filename = os.path.join(dirname, '.karr_lab_build_utils.yml')
if os.path.isfile(config_filename):
with open(config_filename, 'r') as file:
config = yaml.load(file)
deps = config.get('downstream_dependencies', [])
for other_pkg in deps:
dot.edge(pkg, other_pkg)
dot.render(filename=basename, cleanup=True)
def trigger_tests_of_downstream_dependencies(self, config_filename='.karr_lab_build_utils.yml',
dry_run=False):
""" Trigger CircleCI to test downstream dependencies listed in :obj:`config_filename`
Args:
config_filename (:obj:`str`, optional): path to YAML configuration file which contains a list of
downstream dependencies
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:todo: support branches
"""
# stop if this is a dry run
if dry_run:
return []
# stop if the tests didn't pass
test_results = self.get_test_results()
if test_results.get_num_errors() > 0 or test_results.get_num_failures() > 0:
return []
# read downstream dependencies
with open(config_filename, 'r') as file:
config = yaml.load(file)
packages = config.get('downstream_dependencies', [])
# stop if there are no downstream dependencies
if not packages:
return []
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = os.getenv('UPSTREAM_BUILD_NUM', '0')
if not upstream_repo_name:
upstream_repo_name = self.repo_name
upstream_build_num = str(self.build_num)
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
upstream_build_time = dateutil.parser.parse(result['all_commit_details'][0]['committer_date'])
triggered_packages = []
for package in packages:
branch = 'master'
# get summary of recent builds
builds = self.run_circleci_api('', repo_name=package)
# don't trigger build if a build has already been triggered from the same upstream build
# this prevents building the same project multiple times, including infinite looping
already_queued = False
for build in builds:
# don'trigger a build if this is the same package which triggered the cascade
if package == upstream_repo_name and \
str(build['build_num']) == upstream_build_num and \
build['build_num'] != self.build_num:
already_queued = True
break
# don't trigger a build if the package already been triggered from the same upstream commit
build_parameters = build['build_parameters']
if build_parameters and 'UPSTREAM_REPONAME' in build_parameters and \
build_parameters['UPSTREAM_REPONAME'] == upstream_repo_name and \
build_parameters['UPSTREAM_BUILD_NUM'] == upstream_build_num:
already_queued = True
break
# don't trigger a build if the package has already been more recently tested than the commit time
build_start_time = build['start_time']
if build_start_time is None or dateutil.parser.parse(build['start_time']) > upstream_build_time:
already_queued = True
break
if already_queued:
continue
# trigger build
self.run_circleci_api('/tree/{}'.format(branch), method='post', repo_name=package, data={
'build_parameters': {
'UPSTREAM_REPONAME': upstream_repo_name,
'UPSTREAM_BUILD_NUM': upstream_build_num,
}
})
triggered_packages.append(package)
return triggered_packages
def get_version(self):
""" Get the version of this package
Returns:
:obj:`str`: the version
"""
return '{0:s} (Python {1[0]:d}.{1[1]:d}.{1[2]:d})'.format(karr_lab_build_utils.__version__, sys.version_info)
@staticmethod
def get_python_version():
""" Get the Python version
Returns:
:obj:`str`: the Python version
"""
return '{0[0]:d}.{0[1]:d}.{0[2]:d}'.format(sys.version_info)
def run_method_and_capture_stdout(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
Returns:
:obj:`str`: stdout
"""
with abduct.captured(abduct.out(), abduct.err()) as (stdout, stderr):
result = func(*args, **kwargs)
out_msg = stdout.getvalue()
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
return out_msg
def run_method_and_capture_stderr(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
"""
with abduct.captured(abduct.err()) as stderr:
result = func(*args, **kwargs)
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
def analyze_package(self, package_name, messages=None):
""" Perform static analyses of a package using Pylint.
The default options will identify the following issues:
* Unused imported modules, classes, functions, and variables
* Reimported modules, classes, functions, and variables
* Wild card imports outside of __init__.py
* Duplicate arguments and keys
* Missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
messages (:obj:`list` of :obj:`str`): list of Pylint checks to perform
"""
if messages is None:
messages = [
# variables
'W0611', # unused-import
'W0614', # unused-wildcard-import
'W0613', # unused-argument
'W0612', # unused-variable
# imports
'W0404', # reimported
'W0401', # wildcard-import
# similarities
'E0108', # duplicate-argument-name
'W0109', # duplicate-key
]
msg_opts = [
'--disable=all',
'--enable=' + ','.join(messages),
]
report_opts = [
'--reports=n',
'--score=n',
]
# TODO: debug, does not work:
epylint.lint(package_name, msg_opts + report_opts)
def find_missing_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: list of missing dependencies and their occurences in the code
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_missing_reqs.log.setLevel(logging.ERROR)
missing = pip_check_reqs.find_missing_reqs.find_missing_reqs(options)
# filter out optional dependencies
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = install_requires
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps += opt_deps
missing = list(filter(lambda m: m[0].replace('-', '_') not in all_deps, missing))
# sort missing
missing.sort(key=natsort.natsort_keygen(key=lambda m: m[0], alg=natsort.IGNORECASE))
return missing
def find_unused_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding unused_requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: name of the unused dependencies
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.ignore_reqs = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_extra_reqs.log.setLevel(logging.ERROR)
# get all requirements
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = set(install_requires)
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps = all_deps | set(opt_deps)
all_deps = [dep.replace('_', '-') for dep in all_deps]
# find unused requirements
with mock.patch('pip_check_reqs.common.find_required_modules', return_value=all_deps):
unuseds = pip_check_reqs.find_extra_reqs.find_extra_reqs(options)
# correct for editablly-installed packages
useds = pip_check_reqs.common.find_imported_modules(options).keys()
useds = [used.partition('.')[0].replace('_', '-') for used in useds]
unuseds = list(set(unuseds).difference(set(useds)))
# return canonical names
unuseds = [unused.replace('-', '_') for unused in unuseds]
# sort unuseds
unuseds.sort(key=natsort.natsort_keygen(alg=natsort.IGNORECASE))
return unuseds
def upload_package_to_pypi(self, dirname='.', repository='pypi', pypi_config_filename='~/.pypirc'):
""" Upload a package to PyPI
Args:
dirname (:obj:`str`, optional): path to package to upload
repository (:obj:`str`, optional): repository to upload code to (section in .pypirc or a full URL)
pypi_config_filename (:obj:`str`, optional): path to .pypirc
"""
# cleanup
if os.path.isdir(os.path.join(dirname, 'build')):
shutil.rmtree(os.path.join(dirname, 'build'))
if os.path.isdir(os.path.join(dirname, 'dist')):
shutil.rmtree(os.path.join(dirname, 'dist'))
# package code
subprocess.check_call([sys.executable, os.path.join(os.path.abspath(dirname), 'setup.py'), 'sdist', 'bdist_wheel'],
cwd=dirname)
# upload
options = []
if repository:
options += ['--repository', repository]
if pypi_config_filename:
options += ['--config-file', os.path.abspath(os.path.expanduser(pypi_config_filename))]
uploads = []
for path in glob.glob(os.path.join(dirname, 'dist', '*')):
uploads.append(path)
twine.commands.upload.main(options + uploads)
# cleanup
shutil.rmtree(os.path.join(dirname, 'build'))
shutil.rmtree(os.path.join(dirname, 'dist'))
def run_circleci_api(self, command, method='get', repo_type=None, repo_owner=None, repo_name=None,
data=None, circleci_api_token=None):
""" Run the CircleCI API
Args:
command (:obj:`str`): API command
method (:obj:`str`): type of HTTP request (get, post, delete)
repo_type (:obj:`str`, optional): repository type (e.g., github)
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
data (:obj:`str`, optional): data
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: CircleCI result
Raises:
:obj:`requests.exceptions.HTTPError`: if the HTTP request to CircleCI does not succeed
"""
if not repo_type:
repo_type = self.repo_type
if not repo_owner:
repo_owner = self.repo_owner
if not repo_name:
repo_name = self.repo_name
if not circleci_api_token:
circleci_api_token = self.circleci_api_token
url = '{}/project/{}/{}/{}{}?circle-token={}'.format(
self.CIRCLE_API_ENDPOINT, repo_type, repo_owner, repo_name, command, circleci_api_token)
request_method = getattr(requests, method)
response = request_method(url, json=data)
response.raise_for_status()
return response.json()
def get_build_config(self):
""" Get build configuration
Returns:
:obj:`dict`: build configuration
"""
with open('.karr_lab_build_utils.yml', 'r') as file:
return yaml.load(file)
def download_passwords(self, pull=False):
""" Download passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
"""
if six.PY3:
devnull = subprocess.DEVNULL
else:
devnull = open(os.devnull, 'wb')
if os.path.isdir(self.passwords_repo_path):
if pull:
subprocess.check_call(['git', 'pull'], cwd=self.passwords_repo_path,
stdout=devnull, stderr=devnull)
else:
url = self.passwords_repo_url.replace('://', '://{}:{}@'.format(
self.passwords_repo_username, self.passwords_repo_password))
subprocess.check_call(['git', 'clone', url, self.passwords_repo_path],
stdout=devnull, stderr=devnull)
def get_passwords(self, pull=False):
""" Read key/value pairs from the passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
Returns:
:obj:`dict`: key/value pairs
"""
self.download_passwords(pull=pull)
with open(os.path.join(self.passwords_repo_path, 'passwords.yml'), 'r') as file:
return yaml.load(file)
def set_env_vars_from_passwords(self, pull=False):
""" Create OS environment variables based on the key/value pairs in the passwords repository
Args:
pull (:obj:`bool`, optional): if :obj:`True`, pull the passwords
"""
passwords = self.get_passwords(pull=pull)
for name, val in passwords.items():
os.environ[name] = val
class TestResults(object):
""" Unit test results
Attributes:
cases (:obj:`list` of :obj:`TestCase`): test case results
"""
def __init__(self):
self.cases = []
@property
def num_tests(self):
return self.get_num_tests()
@property
def num_passed(self):
return self.get_num_passed()
@property
def num_skipped(self):
return self.get_num_skipped()
@property
def num_errors(self):
return self.get_num_errors()
@property
def num_failures(self):
return self.get_num_failures()
def get_num_tests(self):
""" Get the number of tests
Returns:
:obj:`int`: number of tests
"""
return len(self.cases)
def get_num_passed(self):
""" Get the number of tests that passed
Returns:
:obj:`int`: number of tests that passed
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.passed, self.cases)))
def get_num_skipped(self):
""" Get the number of skipped tests
Returns:
:obj:`int`: number of skipped tests
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.skipped, self.cases)))
def get_num_errors(self):
""" Get the number of tests with errors
Returns:
:obj:`int`: number of tests with errors
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.error, self.cases)))
def get_num_failures(self):
""" Get the number of tests with failures
Returns:
:obj:`int`: number of tests with failures
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.failure, self.cases)))
class TestCaseResult(object):
""" The result of a test case
Attributes:
classname (obj:`str`): name of the class of the test case
name (obj:`str`): name of the test case
filename (obj:`str`): file where the test was defined
line (obj:`int`): line where the test was defined
python_version (obj:`str`): python version which ran the test
type (obj:`TestCaseResultType`): type of the result (pass, skip, error, failure)
subtype (obj:`str`): detailed type of the result
message (obj:`str`): message from the result
details (obj:`str`): detailed message from the result
time (obj:`float`): duration of the time in seconds
stdout (obj:`str`): standard output
stderr (obj:`str`): standard error
"""
def __init__(self):
self.classname = None
self.name = None
self.filename = None
self.line = None
self.python_version = None
self.time = None
self.stdout = None
self.stderr = None
self.type = None
self.subtype = None
self.message = None
self.details = None
class TestCaseResultType(enum.Enum):
""" Type of test case result """
passed = 0
skipped = 1
error = 2
failure = 3
class BuildHelperError(Exception):
""" Represents :obj:`BuildHelper` errors """
pass
|
'''
Created on Oct 10, 2012
@author: dstrauss
'''
from model import fwd
import numpy as np
from scipy import sparse
import sparseTools as spt
from scipy import io as spio
def speye(n):
return sparse.eye(n,n)
class solver(fwd):
''' class to implement the transverse electric mode, rather - the case where we have Ez only '''
def setspace(self, nx,ny,nz,dx, dy, dz):
self.nx = nx # number x points
self.ny = ny # number y points
self.nz = nz # number z points
self.N = nx*ny*nz # super size of space
self.dx = dx # delta x
self.dy = dy # delta y
self.dz = dz # delta z
self.npml = min(10,round(nx/10))
def makeGradOper(self):
''' routine to make a big matrix for TE problems ex,ey,ez all incorporated,
based on the petsc_cpx routine.'''
# a quick hint: pd2 == pdo
pd1 = self.ph*self.d1 # maps full to half grid
pd2 = self.po*self.d2 # maps half to full
AA = sparse.kron(speye(self.nx+1), sparse.kron(pd2,speye(self.nz)))*\
sparse.kron(speye(self.nx+1), sparse.kron(pd1,speye(self.nz))) + \
sparse.kron(speye(self.nx+1), sparse.kron(speye(self.ny),pd2)) * \
sparse.kron(speye(self.nx+1), sparse.kron(speye(self.ny),pd1))
# chngd
AB = sparse.kron(speye(self.nx+1), sparse.kron(pd2,speye(self.nz)))*\
sparse.kron(pd1, sparse.kron(speye(self.ny+1),speye(self.nz)))
AC = sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))*\
sparse.kron(pd1,sparse.kron(speye(self.ny),speye(self.nz+1)))
# chngd
BA = sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))*\
sparse.kron(speye(self.nx+1),sparse.kron(pd1,speye(self.nz)))
# chngd
BB = sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))*\
sparse.kron(pd1,sparse.kron(speye(self.ny+1),speye(self.nz))) + \
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd2))*\
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd1))
# chngd
BC = sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd2))*\
sparse.kron(speye(self.nx),sparse.kron(pd1,speye(self.nz+1)))
# chngd
CA = sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))*\
sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd1))
# chngd
CB = sparse.kron(speye(self.nx),sparse.kron(pd2,speye(self.nz+1)))*\
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd1))
# chngd
CC = sparse.kron(speye(self.nx),sparse.kron(pd2, speye(self.nz+1)))*\
sparse.kron(speye(self.nx),sparse.kron(pd1, speye(self.nz+1))) + \
sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))*\
sparse.kron(pd1,sparse.kron(speye(self.ny),speye(self.nz+1)))
# chngd
# legacy - matlab ordering
# AA = sparse.kron(speye(self.nz),sparse.kron(pd2,speye(self.nx+1)))*\
# sparse.kron(speye(self.nz),sparse.kron(pd1,speye(self.nx+1))) + \
# sparse.kron(pd2,speye((self.nx+1)*self.ny))*sparse.kron(pd1,speye((self.nx+1)*self.ny))
#
# AB = sparse.kron(speye(self.nz),sparse.kron(pd2,speye(self.nx+1)))*\
# sparse.kron(speye(self.nz),sparse.kron(speye(self.ny+1), pd1))
#
# AC = sparse.kron(pd2,speye(self.ny*(self.nx+1)))*\
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny), pd1))
#
# BA = sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd2))*\
# sparse.kron(speye(self.nz),sparse.kron(pd1,speye(self.nx+1)))
#
# BB = sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd2))*\
# sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd1)) + \
# sparse.kron(pd2,speye((self.ny+1)*self.nx))*sparse.kron(pd1,speye((self.ny+1)*self.ny))
#
# BC = sparse.kron(pd2,speye((self.ny+1)*self.nx))*\
# sparse.kron(speye(self.nz+1),sparse.kron(pd1,speye(self.nx)))
#
# CA = sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd2))*\
# sparse.kron(pd1,speye((self.nx+1)*self.ny))
#
# CB = sparse.kron(speye(self.nz+1),sparse.kron(pd2,speye(self.nx)))*\
# sparse.kron(pd1,speye((self.ny+1)*self.nx))
#
# CC = sparse.kron(speye(self.nz+1),sparse.kron(pd2,speye(self.nx)))*\
# sparse.kron(speye(self.nz+1),sparse.kron(pd1,speye(self.nx))) + \
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd2))*\
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd1))
self.nabla2 = spt.vCat([spt.hCat([AA, -AB, -AC]),\
spt.hCat([-BA, BB, -BC]), \
spt.hCat([-CA, -CB, CC])])
def setmats(self, eHSr, sHS, div):
"""This routine does setup for the diagonal entries of the nabla matrix.
Also included are interpolation operators to mapf rom half spaces to non-half spaces, maybe
"""
self.eHS = eHSr*self.epso # half space permitivity
self.sHS = sHS # half space conductivity
self.div = div # hslf space dividing line
self.kfree = 2*np.pi/self.l; # define k in free space
self.kHS = np.sqrt(self.muo*self.eHS*(self.w**2) \
+ 1j*self.w*self.muo*self.sHS); # k in subsurface
self.N = (self.nx+1)*self.ny*self.nz + \
self.nx*(self.ny+1)*self.nz + \
self.nx*self.ny*(self.nz+1)
# epsilon is easy it is always going to be uniform
self.epsmap = [self.epso*np.ones(self.N),\
self.epso*np.ones(self.N)]
# sigma is not so straight forward
sigX = np.zeros((self.nx+1,self.ny,self.nz))
sigX[:,:(div),:] = self.sHS
sigY = np.zeros((self.nx,self.ny+1,self.nz))
sigY[:,:(div+1),:] = self.sHS
sigZ = np.zeros((self.nx,self.ny,self.nz+1))
sigZ[:,:(div),:] = self.sHS
self.sigX = sigX
self.sigY = sigY
self.sigZ = sigZ
self.sigmap = np.concatenate((sigX.flatten(), sigY.flatten(), sigZ.flatten()))
# and duplicate
self.sigmap = [self.sigmap, self.sigmap.copy()]
# this is the total number of unknown field values in the entire simulation space
self.sol = [np.zeros((self.N,1)), np.zeros((self.N,1))]
def getk(self, ind):
""" This routine assembles a diagonal matrix with the materials indexed by ind
"""
kl = (self.muo*self.epsmap[ind]*(self.w**2) + 1j*self.w*self.muo*self.sigmap[ind] )
return sparse.spdiags(kl.flatten(), 0, self.N, self.N)
def setMs(self, nSensors=10):
'''Creates an n-grid mesh across the surface for the 3D case '''
self.nSen = nSensors*nSensors
'''First find the appropriate 10 indexes within the PML & illumination region '''
indx = np.round(np.linspace(self.npml+5,self.nx-self.npml-5, nSensors)-1).astype(int);
# print indx
# print (indx + 1)
''' make the exact X operator using strides '''
xl,zl = np.meshgrid(indx+1,indx)
Mx = sparse.dok_matrix((self.nSen,(self.nx+1)*self.ny*self.nz))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*self.ny*self.nz + self.div*self.nz + loc[1]
Mx[ix,pts] = 1.0
xl,zl = np.meshgrid(indx,indx)
My = sparse.dok_matrix((self.nSen,self.nx*(self.ny+1)*self.nz))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*(self.ny+1)*self.nz + (self.div+1)*self.nz + loc[1]
My[ix,pts] = 1.0
'''make the exact Z operator using strides '''
xl,zl = np.meshgrid(indx,indx+1)
Mz = sparse.dok_matrix((self.nSen,self.nx*self.ny*(self.nz+1)))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*self.ny*(self.nz+1) + self.div*(self.nz+1) + loc[1]
Mz[ix,pts] = 1.0
''' smush together in block diagonal format '''
self.Ms = sparse.block_diag((Mx,My,Mz),'csr')
def setCTRX(self):
''' create some operators to map back and forth between the x space and the u '''
self.p2x = sparse.eye(self.nRx*self.nRy*self.nRz,self.nRx*self.nRy*self.nRz)
self.p2x = sparse.vstack((self.p2x,self.p2x,self.p2x))
# # print self.p2x.shape
self.x2u = self.Md.T
# # print self.x2u.shape
def getXSize(self):
''' return the proper size of X so that the optimization routine can work its magic '''
return 3*self.nRx*self.nRy*self.nRz
def setMd(self, xrng, yrng, zrng):
'''Tell me the xrange,yrange, and zrange and Ill
1) specify nRx,nRy, and nRz
2) produce a matrix that achieves a 1:1 sampling, self.Md '''
'''set the right dimensions'''
self.nRx = xrng[1]-xrng[0]
self.nRy = yrng[1]-yrng[0]
self.nRz = zrng[1]-zrng[0]
nR = self.nRx*self.nRy*self.nRz
''' ok have to use spans:
loc = i*J*K + j*K + k for row-major ordering '''
''' populate the locations in the X grid'''
#sX = sparse.dok_matrix((self.nx+1,self.ny,self.nz),dtype='bool')
#sX[xrng[0]+1:xrng[1]+1,yrng[0]:yrng[1],zrng[0]:zrng[1]] = True
''' make it an operator '''
''' nested for should give reshape-able vectors '''
cnt = 0
Mx = sparse.dok_matrix((nR,(self.nx+1)*self.ny*self.nz))
for x in xrange(xrng[0]+1,xrng[1]+1):
for y in xrange(yrng[0],yrng[1]):
for z in xrange(zrng[0],zrng[1]):
pts = x*self.ny*self.nz + y*self.nz + z
Mx[cnt,pts] = 1.0
cnt += 1
'''populate the locations in the Y grid'''
My = sparse.dok_matrix((nR,self.nx*(self.ny+1)*self.nz))
cnt = 0
for x in xrange(xrng[0],xrng[1]):
for y in xrange(yrng[0]+1,yrng[1]+1):
for z in xrange(zrng[0],zrng[1]):
pts = x*(self.ny+1)*self.nz + y*self.nz + z
My[cnt,pts] = 1.0
cnt += 1
'''populate the locations in the Z grid'''
Mz = sparse.dok_matrix((nR,self.nx*self.ny*(self.nz+1)))
cnt = 0
for x in xrange(xrng[0],xrng[1]):
for y in xrange(yrng[0],yrng[1]):
for z in xrange(zrng[0]+1,zrng[1]+1):
pts = x*(self.ny)*(self.nz+1) + y*(self.nz+1) + z
Mz[cnt,pts] = 1.0
cnt += 1
''' put them all together in a block matrix '''
self.Md = sparse.block_diag((Mx,My,Mz), 'csc')
def parseFields(self,u):
''' Method to return the field in its square form'''
hi = (self.nx+1)*(self.ny)*self.nz
ex = u[:hi]
ex = ex.reshape(self.nx+1,self.ny,self.nz)
hj = hi + (self.nx)*(self.ny+1)*self.nz
ey = u[hi:hj]
ey = ey.reshape(self.nx,self.ny+1,self.nz)
ez = u[hj:]
ez = ez.reshape(self.nx,self.ny,self.nz+1)
return [ex,ey,ez]
def pointSource(self, x,y,z):
""" A routine to add a point source at the grid loc (x,y) """
rhsz = np.zeros((self.nx,self.ny,self.nz+1),dtype='complex128')
rhsz[x,y,z] = 1.0
# rhsz = rhsz.flatten()
rhsx = np.zeros((self.nx+1,self.ny,self.nz))
rhsy = np.zeros((self.nx,self.ny+1,self.nz))
#self.rhs = np.zeros(self.N,dtype='complex128')
#self.rhs[23614-1] = 1.0
self.rhs = np.concatenate((rhsx.flatten(), rhsy.flatten(), rhsz.flatten()))
def planeWave(self):
''' populates self.rhs with a planewave with incident conditions:
self.incAng, self.azAng '''
''' incident angles, local copy '''
thi = self.incAng
phi = self.azAng
''' how far in from the PML should we go? -- 2 grid points should be enough '''
instep = 2+self.npml;
# mdpt = nx/2; # should replace by div
x = np.arange(1,1+self.nx,dtype='float64')*self.dx
y = np.arange(1,1+self.ny,dtype='float64')*self.dy
z = np.arange(1,1+self.nz,dtype='float64')*self.dz
# The assumption is that the Ez and materials are co
# located. Since epsilon(50) => in the half space, epsilon(51) =>
# is not, the actual zero boundary must be between them, or on
# that y boundary.
# Mapping is y,x because of how matlab treats these. annoying.
# (
# print self.div+1
Yz,Xz,Zz = np.meshgrid(y-(self.div+0.5)*self.dy,\
x,\
(np.append(0.0,z) + self.dz/2.0))
Yx,Xx,Zx = np.meshgrid(y-(self.div+0.5)*self.dy,\
(np.append(0.0,x) + self.dx/2.0),\
z)
Yy,Xy,Zy = np.meshgrid((np.append(0.0,y) + self.dy/2.0)-(self.div+0.5)*self.dy,\
x,\
z)
Yxh,Xxh,Zxh = np.meshgrid(np.append(0.0,y)+self.dy/2.0 - (self.div+0.5)*self.dy, \
x,\
np.append(0.0,z)+self.dz/2.0)
Yyh,Xyh,Zyh = np.meshgrid(y-(self.div+0.5)*self.dy,\
np.append(0.0,x)+self.dx/2.0,\
np.append(0.0,z)+self.dz/2.0)
Yzh,Xzh,Zzh = np.meshgrid(np.append(0.0,y)+self.dy/2.0-(self.div+0.5)*self.dy,\
np.append(0.0,x)+self.dx/2.0,\
z)
# matOut.savemat('grids', {'Y':Y, 'X':X, 'Yyh':Yyh, 'Xyh':Xyh, 'Yxh':Yxh, 'Xxh':Xxh})
ni = 1;
nt = np.sqrt((self.eHS-self.sHS/(1j*self.w))*self.muo)/np.sqrt(self.epso*self.muo);
# transmitted angle angle
# thi = 45*np.pi/180 # taken as input argument.
tht = np.arcsin(ni*np.sin(thi)/nt);
# create the coefficients to specify the space.
kinc = -1*np.array([np.sin(thi)*np.cos(phi), np.cos(thi), -np.sin(phi)*np.sin(thi)])
ktx = -1*np.array([np.sin(tht)*np.cos(phi), np.cos(tht), -np.sin(phi)*np.sin(thi)])
kFS = 1j*self.kfree;
kHS = 1j*self.kHS;
etaF = np.sqrt(self.muo/self.epso);
etaH = np.sqrt(self.muo/(self.eHS+(1j*self.sHS/self.w)));
rTE = (ni*np.cos(thi) - nt*np.cos(tht))/(ni*np.cos(thi) + nt*np.cos(tht));
tTE = (2*ni*np.cos(thi))/(ni*np.cos(thi) + nt*np.cos(tht));
''' make selectors for the subspace and so on'''
thsz = np.zeros((self.nx,self.ny,self.nz+1),dtype='bool')
thsz[Yz<0] = 1
thsx = np.zeros((self.nx+1,self.ny,self.nz),dtype='bool')
thsx[Yx<0] = 1
# thsy = thsy.astype(bool)
thsy = np.zeros((self.nx,self.ny+1,self.nz),dtype='bool')
thsy[Yy<0] = 1
''' make selectors for the halfgrid subspace '''
thsxh = np.zeros((self.nx,self.ny+1,self.nz+1),dtype='bool')
thsxh[Yxh<0] = 1
thsyh = np.zeros((self.nx+1,self.ny,self.nz+1),dtype='bool')
thsyh[Yyh<0] = 1
thszh = np.zeros((self.nx+1,self.ny+1,self.nz),dtype='bool')
thszh[Yzh<0] = 1
''' in the matlab version, I stick some eHS in here at this point - not sure if I need that now '''
Ezinc = np.cos(phi)*te_ezf(Xz,Yz,Zz,thsz,kinc,ktx,rTE,tTE,kFS,kHS)
Exinc = np.sin(phi)*te_ezf(Xx,Yx,Zx,thsx,kinc,ktx,rTE,tTE,kFS,kHS)
Eyinc = np.zeros((self.nx,self.ny+1,self.nz),dtype='complex128')
Hxinc = te_hf(Xxh,Yxh,Zxh,thsxh,kinc,ktx,rTE,tTE,kFS,kHS)
Hxinc[~thsxh] = Hxinc[~thsxh]*(1.0/etaF)*np.cos(phi)*kinc[1]
Hxinc[thsxh] = Hxinc[thsxh]*(1.0/etaH)*np.cos(phi)*ktx[1]
Hyinc = te_ezf(Xyh,Yyh,Zyh,thsyh,kinc,ktx,rTE,tTE,kFS,kHS)
Hyinc[~thsyh] = Hyinc[~thsyh]*(1.0/etaF)*(kinc[2]*np.sin(phi) - kinc[0]*np.cos(phi))
Hyinc[thsyh] = Hyinc[thsyh]*(1.0/etaH)*(ktx[2]*np.sin(phi) - ktx[0]*np.cos(phi))
Hzinc = te_ezf(Xzh,Yzh,Zzh,thszh,kinc,ktx,rTE,tTE,kFS,kHS)
Hzinc[~thszh] = Hzinc[~thszh]*(1.0/etaF)*(-np.sin(phi)*kinc[1])
Hzinc[thszh] = Hzinc[thszh]*(1.0/etaH)*(-np.sin(phi)*kinc[1])
# spio.savemat('intrn', {'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc})
xl = instep-1; xr = self.nx-instep-1;
yb = instep-1; yt = self.ny-instep-1;
zb = instep-1; zt = self.nz-instep-1;
Jsrcx = np.zeros([self.nx+1,self.ny,self.nz],dtype='complex128')
Jsrcy = np.zeros([self.nx,self.ny+1,self.nz],dtype='complex128')
Jsrcz = np.zeros([self.nx,self.ny,self.nz+1],dtype='complex128')
Msrcx = np.zeros([self.nx,self.ny+1,self.nz+1], dtype='complex128')
Msrcy = np.zeros([self.nx+1,self.ny,self.nz+1], dtype='complex128')
Msrcz = np.zeros([self.nx+1,self.ny+1,self.nz], dtype='complex128')
# 5.48a
Jsrcx[xl+1:(1+xr),yb,zb:(1+zt)] = Jsrcx[xl+1:(1+xr),yb,zb:(1+zt)] + Hzinc[xl+1:(1+xr),yb,zb:(1+zt)]/self.dx
# 5.49a
Jsrcx[xl+1:(1+xr),yt,zb:(1+zt)] = Jsrcx[xl+1:(1+xr),yt,zb:(1+zt)] - Hzinc[xl+1:(1+xr),yt+1,zb:(1+zt)]/self.dx
# 5.48b
Jsrcz[xl:(1+xr),yb,zb+1:(1+zt)] = Jsrcz[xl:(1+xr),yb,zb+1:(1+zt)] - Hxinc[xl:(1+xr),yb,zb+1:(1+zt)]/self.dx
# 5.49b
Jsrcz[xl:(1+xr),yt,zb+1:(1+zt)] = Jsrcz[xl:(1+xr),yt,zb+1:(1+zt)] + Hxinc[xl:(1+xr),yt+1,zb+1:(1+zt)]/self.dx
# 5.50a
Jsrcx[xl+1:(1+xr),yb:(1+yt),zb] = Jsrcx[xl+1:(1+xr),yb:(1+yt),zb] - Hyinc[xl+1:(1+xr),yb:(1+yt),zb]/self.dx
# 5.50b
Jsrcy[xl:(1+xr),yb+1:(1+yt),zb] = Jsrcy[xl:(1+xr),yb+1:(1+yt),zb] + Hxinc[xl:(1+xr),yb+1:(1+yt),zb]/self.dx
# 5.51a
Jsrcx[xl+1:(1+xr),yb:(1+yt),zt] = Jsrcx[xl+1:(1+xr),yb:(1+yt),zt] + Hyinc[xl+1:(1+xr),yb:(1+yt),zt+1]/self.dx
# 5.51b
Jsrcy[xl:(1+xr),yb+1:(1+yt),zt] = Jsrcy[xl:(1+xr),yb+1:(1+yt),zt] - Hxinc[xl:(1+xr),yb+1:(1+yt),zt+1]/self.dx
# 5.52a
Jsrcy[xl,yb+1:(1+yt),zb:(1+zt)] = Jsrcy[xl,yb+1:(1+yt),zb:(1+zt)] - Hzinc[xl,yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.52b
Jsrcz[xl,yb:(1+yt),zb+1:(1+zt)] = Jsrcz[xl,yb:(1+yt),zb+1:(1+zt)] + Hyinc[xl,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.53a
Jsrcy[xr,yb+1:(1+yt),zb:(1+zt)] = Jsrcy[xr,yb+1:(1+yt),zb:(1+zt)] + Hzinc[xr+1,yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.53b
Jsrcz[xr,yb:(1+yt),zb+1:(1+zt)] = Jsrcz[xr,yb:(1+yt),zb+1:(1+zt)] - Hyinc[xr+1,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.54a
Msrcz[xl+1:(1+xr),yb,zb:(1+zt)] = Msrcz[xl+1:(1+xr),yb,zb:(1+zt)] - Exinc[xl+1:(1+xr),yb,zb:(1+zt)]/self.dx
# 5.54b
Msrcx[xl:(1+xr),yb,zb+1:(1+zt)] = Msrcx[xl:(1+xr),yb,zb+1:(1+zt)] + Ezinc[xl:(1+xr),yb,zb+1:(1+zt)]/self.dx
# 5.55a
Msrcz[xl+1:(1+xr),yt+1,zb+1:(1+zt)] = Msrcz[xl+1:(1+xr),yt+1,zb+1:(1+zt)] + Exinc[xl+1:(1+xr),yt, zb+1:(1+zt)]/self.dx
# 5.55b
Msrcx[xl:(1+xr),yt+1,zb+1:(1+zt)] = Msrcx[xl:(1+xr),yt+1,zb+1:(1+zt)] - Ezinc[xl:(1+xr),yt,zb+1:(1+zt)]/self.dx
# 5.56a
Msrcy[xl+1:(1+xr),yb:(1+yt),zb] = Msrcy[xl+1:(1+xr),yb:(1+yt),zb] + Exinc[xl+1:(1+xr),yb:(1+yt),zb]/self.dx
# 5.56b
Msrcx[xl:(1+xr),yb+1:(1+yt),zb] = Msrcx[xl:(1+xr),yb+1:(1+yt),zb] - Eyinc[xl:(1+xr),yb+1:(1+yt),zb]/self.dx
# 5.57a
Msrcy[xl+1:(1+xr),yb:(1+yt),zt+1] = Msrcy[xl+1:(1+xr),yb:(1+yt),zt+1] - Exinc[xl+1:(1+xr),yb:(1+yt),zt]/self.dx
# 5.57b
Msrcx[xl:(1+xr),yb+1:(1+yt),zt+1] = Msrcx[xl:(1+xr),yb+1:(1+yt),zt+1] + Eyinc[xl:(1+xr),yb+1:(1+yt),zt]/self.dx
# 5.58a
Msrcz[xl, yb+1:(1+yt),zb:(1+zt)] = Msrcz[xl, yb+1:(1+yt), zb:(1+zt)] + Eyinc[xl, yb+1:(1+yt), zb:(1+zt)]/self.dx
# 5.58b
Msrcy[xl, yb:(1+yt), zb+1:(1+zt)] = Msrcy[xl,yb:(1+yt),zb+1:(1+zt)] - Ezinc[xl,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.59a
Msrcz[xr+1,yb+1:(1+yt),zb:(1+zt)] = Msrcz[xr+1,yb+1:(1+yt),zb:(1+zt)] - Eyinc[xr, yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.59b
Msrcy[xr+1,yb:(1+yt),zb+1:(1+zt)] = Msrcy[xr+1,yb:(1+yt),zb+1:(1+zt)] + Ezinc[xr,yb:(1+yt),zb+1:(1+zt)]/self.dx
# spio.savemat('intrn', {'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc})
spio.savemat('intrn', {'sigx':self.sigX, 'sigy':self.sigY, 'sigz':self.sigZ,\
'exinc':Exinc,'eyinc':Eyinc,'ezinc':Ezinc,\
'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc,\
'jsrcx':Jsrcx,'jsrcy':Jsrcy,'jsrcz':Jsrcz, \
'msrcx':Msrcx,'msrcy':Msrcy,'msrcz':Msrcz})
J = np.concatenate((Jsrcx.flatten(),Jsrcy.flatten(),Jsrcz.flatten()))
M = np.concatenate((Msrcx.flatten(),Msrcy.flatten(),Msrcz.flatten()))
# ok. have to make some transformations in order to make this happen.
pd1 = self.ph*self.d1 # maps full to half grid
pd2 = self.po*self.d2
'''make a curl operator from M -> J '''
nex = (self.nx+1)*self.ny*self.nz
ney = self.nx*(self.ny+1)*self.nz
nez = self.nx*self.ny*(self.nz+1)
nhx = self.nx*(self.ny+1)*(self.nz+1)
nhy = (self.nx+1)*self.ny*(self.nz+1)
nhz = (self.nx+1)*(self.ny+1)*self.nz
AA = sparse.coo_matrix((nex,nhx))
AB = -sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))
AC = sparse.kron(speye(self.nx+1),sparse.kron(pd2,speye(self.nz)))
BA = sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))
BB = sparse.coo_matrix((ney,nhy))
BC = -sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))
CA = -sparse.kron(speye(self.nx),sparse.kron(pd2,speye(self.nz+1)))
CB = sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))
CC = sparse.coo_matrix((nez,nhz))
srB = spt.vCat([spt.hCat([AA,AB,AC]), spt.hCat([BA,BB,BC]), spt.hCat([CA,CB,CC])])
self.rhs = -(1j*self.muo*self.w*J + srB*M)
def getS(self):
''' return the coefficient necessary in the Md*P part to make things work '''
return self.w*self.muo*1j
def te_ezf(X,Y,Z,xi,kinc,ktx,rTE,tTE,kFS,kHS):
m,n,p = X.shape
ez = np.zeros((m,n,p),dtype='complex128')
ez[~xi] = np.exp(kFS*(X[~xi]*kinc[0] + \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2])) + \
rTE*np.exp(kFS*(X[~xi]*kinc[0] - \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2]))
ez[xi] = tTE*np.exp(kHS*(X[xi]*ktx[0] + \
Y[xi]*ktx[1] + \
Z[xi]*ktx[2]))
return ez
def te_hf(X,Y,Z,xi,kinc,ktx,rTE,tTE,kFS,kHS):
''' right the only difference is in the sign of the reflected wave '''
m,n,p = X.shape
ez = np.zeros((m,n,p),dtype='complex128')
ez[~xi] = np.exp(kFS*(X[~xi]*kinc[0] + \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2])) - \
rTE*np.exp(kFS*(X[~xi]*kinc[0] - \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2]))
ez[xi] = tTE*np.exp(kHS*(X[xi]*ktx[0] + \
Y[xi]*ktx[1] + \
Z[xi]*ktx[2]))
return ez
shapes not aligning.
'''
Created on Oct 10, 2012
@author: dstrauss
'''
from model import fwd
import numpy as np
from scipy import sparse
import sparseTools as spt
from scipy import io as spio
def speye(n):
return sparse.eye(n,n)
class solver(fwd):
''' class to implement the transverse electric mode, rather - the case where we have Ez only '''
def setspace(self, nx,ny,nz,dx, dy, dz):
self.nx = nx # number x points
self.ny = ny # number y points
self.nz = nz # number z points
self.N = nx*ny*nz # super size of space
self.dx = dx # delta x
self.dy = dy # delta y
self.dz = dz # delta z
self.npml = min(10,round(nx/10))
def makeGradOper(self):
''' routine to make a big matrix for TE problems ex,ey,ez all incorporated,
based on the petsc_cpx routine.'''
# a quick hint: pd2 == pdo
pd1 = self.ph*self.d1 # maps full to half grid
pd2 = self.po*self.d2 # maps half to full
AA = sparse.kron(speye(self.nx+1), sparse.kron(pd2,speye(self.nz)))*\
sparse.kron(speye(self.nx+1), sparse.kron(pd1,speye(self.nz))) + \
sparse.kron(speye(self.nx+1), sparse.kron(speye(self.ny),pd2)) * \
sparse.kron(speye(self.nx+1), sparse.kron(speye(self.ny),pd1))
# chngd
AB = sparse.kron(speye(self.nx+1), sparse.kron(pd2,speye(self.nz)))*\
sparse.kron(pd1, sparse.kron(speye(self.ny+1),speye(self.nz)))
AC = sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))*\
sparse.kron(pd1,sparse.kron(speye(self.ny),speye(self.nz+1)))
# chngd
BA = sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))*\
sparse.kron(speye(self.nx+1),sparse.kron(pd1,speye(self.nz)))
# chngd
BB = sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))*\
sparse.kron(pd1,sparse.kron(speye(self.ny+1),speye(self.nz))) + \
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd2))*\
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd1))
# chngd
BC = sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd2))*\
sparse.kron(speye(self.nx),sparse.kron(pd1,speye(self.nz+1)))
# chngd
CA = sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))*\
sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd1))
# chngd
CB = sparse.kron(speye(self.nx),sparse.kron(pd2,speye(self.nz+1)))*\
sparse.kron(speye(self.nx),sparse.kron(speye(self.ny+1),pd1))
# chngd
CC = sparse.kron(speye(self.nx),sparse.kron(pd2, speye(self.nz+1)))*\
sparse.kron(speye(self.nx),sparse.kron(pd1, speye(self.nz+1))) + \
sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))*\
sparse.kron(pd1,sparse.kron(speye(self.ny),speye(self.nz+1)))
# chngd
# legacy - matlab ordering
# AA = sparse.kron(speye(self.nz),sparse.kron(pd2,speye(self.nx+1)))*\
# sparse.kron(speye(self.nz),sparse.kron(pd1,speye(self.nx+1))) + \
# sparse.kron(pd2,speye((self.nx+1)*self.ny))*sparse.kron(pd1,speye((self.nx+1)*self.ny))
#
# AB = sparse.kron(speye(self.nz),sparse.kron(pd2,speye(self.nx+1)))*\
# sparse.kron(speye(self.nz),sparse.kron(speye(self.ny+1), pd1))
#
# AC = sparse.kron(pd2,speye(self.ny*(self.nx+1)))*\
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny), pd1))
#
# BA = sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd2))*\
# sparse.kron(speye(self.nz),sparse.kron(pd1,speye(self.nx+1)))
#
# BB = sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd2))*\
# sparse.kron(speye(self.nz),sparse.kron(speye(self.nx+1),pd1)) + \
# sparse.kron(pd2,speye((self.ny+1)*self.nx))*sparse.kron(pd1,speye((self.ny+1)*self.ny))
#
# BC = sparse.kron(pd2,speye((self.ny+1)*self.nx))*\
# sparse.kron(speye(self.nz+1),sparse.kron(pd1,speye(self.nx)))
#
# CA = sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd2))*\
# sparse.kron(pd1,speye((self.nx+1)*self.ny))
#
# CB = sparse.kron(speye(self.nz+1),sparse.kron(pd2,speye(self.nx)))*\
# sparse.kron(pd1,speye((self.ny+1)*self.nx))
#
# CC = sparse.kron(speye(self.nz+1),sparse.kron(pd2,speye(self.nx)))*\
# sparse.kron(speye(self.nz+1),sparse.kron(pd1,speye(self.nx))) + \
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd2))*\
# sparse.kron(speye(self.nz+1),sparse.kron(speye(self.ny),pd1))
self.nabla2 = spt.vCat([spt.hCat([AA, -AB, -AC]),\
spt.hCat([-BA, BB, -BC]), \
spt.hCat([-CA, -CB, CC])])
def setmats(self, eHSr, sHS, div):
"""This routine does setup for the diagonal entries of the nabla matrix.
Also included are interpolation operators to mapf rom half spaces to non-half spaces, maybe
"""
self.eHS = eHSr*self.epso # half space permitivity
self.sHS = sHS # half space conductivity
self.div = div # hslf space dividing line
self.kfree = 2*np.pi/self.l; # define k in free space
self.kHS = np.sqrt(self.muo*self.eHS*(self.w**2) \
+ 1j*self.w*self.muo*self.sHS); # k in subsurface
self.N = (self.nx+1)*self.ny*self.nz + \
self.nx*(self.ny+1)*self.nz + \
self.nx*self.ny*(self.nz+1)
# epsilon is easy it is always going to be uniform
self.epsmap = [self.epso*np.ones(self.N),\
self.epso*np.ones(self.N)]
# sigma is not so straight forward
sigX = np.zeros((self.nx+1,self.ny,self.nz))
sigX[:,:(div),:] = self.sHS
sigY = np.zeros((self.nx,self.ny+1,self.nz))
sigY[:,:(div+1),:] = self.sHS
sigZ = np.zeros((self.nx,self.ny,self.nz+1))
sigZ[:,:(div),:] = self.sHS
self.sigX = sigX
self.sigY = sigY
self.sigZ = sigZ
self.sigmap = np.concatenate((sigX.flatten(), sigY.flatten(), sigZ.flatten()))
# and duplicate
self.sigmap = [self.sigmap, self.sigmap.copy()]
# this is the total number of unknown field values in the entire simulation space
self.sol = [np.zeros((self.N,1)), np.zeros((self.N,1))]
def getk(self, ind):
""" This routine assembles a diagonal matrix with the materials indexed by ind
"""
kl = (self.muo*self.epsmap[ind]*(self.w**2) + 1j*self.w*self.muo*self.sigmap[ind] )
return sparse.spdiags(kl.flatten(), 0, self.N, self.N)
def setMs(self, nSensors=10):
'''Creates an n-grid mesh across the surface for the 3D case '''
self.nSen = nSensors*nSensors
'''First find the appropriate 10 indexes within the PML & illumination region '''
indx = np.round(np.linspace(self.npml+5,self.nx-self.npml-5, nSensors)-1).astype(int);
# print indx
# print (indx + 1)
''' make the exact X operator using strides '''
xl,zl = np.meshgrid(indx+1,indx)
Mx = sparse.dok_matrix((self.nSen,(self.nx+1)*self.ny*self.nz))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*self.ny*self.nz + self.div*self.nz + loc[1]
Mx[ix,pts] = 1.0
xl,zl = np.meshgrid(indx,indx)
My = sparse.dok_matrix((self.nSen,self.nx*(self.ny+1)*self.nz))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*(self.ny+1)*self.nz + (self.div+1)*self.nz + loc[1]
My[ix,pts] = 1.0
'''make the exact Z operator using strides '''
xl,zl = np.meshgrid(indx,indx+1)
Mz = sparse.dok_matrix((self.nSen,self.nx*self.ny*(self.nz+1)))
for ix,loc in enumerate(zip(xl.flatten(),zl.flatten())):
pts = loc[0]*self.ny*(self.nz+1) + self.div*(self.nz+1) + loc[1]
Mz[ix,pts] = 1.0
''' smush together in block diagonal format '''
self.Ms = sparse.block_diag((Mx,My,Mz),'csr')
def setCTRX(self):
''' create some operators to map back and forth between the x space and the u '''
self.p2x = sparse.eye(self.nRx*self.nRy*self.nRz,self.nRx*self.nRy*self.nRz)
self.p2x = sparse.vstack((self.p2x,self.p2x,self.p2x))
# # print self.p2x.shape
self.x2u = self.Md.T
# # print self.x2u.shape
def getXSize(self):
''' return the proper size of X so that the optimization routine can work its magic '''
return 3*self.nRx*self.nRy*self.nRz
def setMd(self, xrng, yrng, zrng):
'''Tell me the xrange,yrange, and zrange and Ill
1) specify nRx,nRy, and nRz
2) produce a matrix that achieves a 1:1 sampling, self.Md '''
'''set the right dimensions'''
self.nRx = xrng[1]-xrng[0]
self.nRy = yrng[1]-yrng[0]
self.nRz = zrng[1]-zrng[0]
nR = self.nRx*self.nRy*self.nRz
''' ok have to use spans:
loc = i*J*K + j*K + k for row-major ordering '''
''' populate the locations in the X grid'''
#sX = sparse.dok_matrix((self.nx+1,self.ny,self.nz),dtype='bool')
#sX[xrng[0]+1:xrng[1]+1,yrng[0]:yrng[1],zrng[0]:zrng[1]] = True
''' make it an operator '''
''' nested for should give reshape-able vectors '''
cnt = 0
Mx = sparse.dok_matrix((nR,(self.nx+1)*self.ny*self.nz))
for x in xrange(xrng[0]+1,xrng[1]+1):
for y in xrange(yrng[0],yrng[1]):
for z in xrange(zrng[0],zrng[1]):
pts = x*self.ny*self.nz + y*self.nz + z
Mx[cnt,pts] = 1.0
cnt += 1
'''populate the locations in the Y grid'''
My = sparse.dok_matrix((nR,self.nx*(self.ny+1)*self.nz))
cnt = 0
for x in xrange(xrng[0],xrng[1]):
for y in xrange(yrng[0]+1,yrng[1]+1):
for z in xrange(zrng[0],zrng[1]):
pts = x*(self.ny+1)*self.nz + y*self.nz + z
My[cnt,pts] = 1.0
cnt += 1
'''populate the locations in the Z grid'''
Mz = sparse.dok_matrix((nR,self.nx*self.ny*(self.nz+1)))
cnt = 0
for x in xrange(xrng[0],xrng[1]):
for y in xrange(yrng[0],yrng[1]):
for z in xrange(zrng[0]+1,zrng[1]+1):
pts = x*(self.ny)*(self.nz+1) + y*(self.nz+1) + z
Mz[cnt,pts] = 1.0
cnt += 1
''' put them all together in a block matrix '''
self.Md = sparse.block_diag((Mx,My,Mz), 'csc')
print 'Md shape ' + repr(self.Md.shape)
def parseFields(self,u):
''' Method to return the field in its square form'''
hi = (self.nx+1)*(self.ny)*self.nz
ex = u[:hi]
ex = ex.reshape(self.nx+1,self.ny,self.nz)
hj = hi + (self.nx)*(self.ny+1)*self.nz
ey = u[hi:hj]
ey = ey.reshape(self.nx,self.ny+1,self.nz)
ez = u[hj:]
ez = ez.reshape(self.nx,self.ny,self.nz+1)
return [ex,ey,ez]
def pointSource(self, x,y,z):
""" A routine to add a point source at the grid loc (x,y) """
rhsz = np.zeros((self.nx,self.ny,self.nz+1),dtype='complex128')
rhsz[x,y,z] = 1.0
# rhsz = rhsz.flatten()
rhsx = np.zeros((self.nx+1,self.ny,self.nz))
rhsy = np.zeros((self.nx,self.ny+1,self.nz))
#self.rhs = np.zeros(self.N,dtype='complex128')
#self.rhs[23614-1] = 1.0
self.rhs = np.concatenate((rhsx.flatten(), rhsy.flatten(), rhsz.flatten()))
def planeWave(self):
''' populates self.rhs with a planewave with incident conditions:
self.incAng, self.azAng '''
''' incident angles, local copy '''
thi = self.incAng
phi = self.azAng
''' how far in from the PML should we go? -- 2 grid points should be enough '''
instep = 2+self.npml;
# mdpt = nx/2; # should replace by div
x = np.arange(1,1+self.nx,dtype='float64')*self.dx
y = np.arange(1,1+self.ny,dtype='float64')*self.dy
z = np.arange(1,1+self.nz,dtype='float64')*self.dz
# The assumption is that the Ez and materials are co
# located. Since epsilon(50) => in the half space, epsilon(51) =>
# is not, the actual zero boundary must be between them, or on
# that y boundary.
# Mapping is y,x because of how matlab treats these. annoying.
# (
# print self.div+1
Yz,Xz,Zz = np.meshgrid(y-(self.div+0.5)*self.dy,\
x,\
(np.append(0.0,z) + self.dz/2.0))
Yx,Xx,Zx = np.meshgrid(y-(self.div+0.5)*self.dy,\
(np.append(0.0,x) + self.dx/2.0),\
z)
Yy,Xy,Zy = np.meshgrid((np.append(0.0,y) + self.dy/2.0)-(self.div+0.5)*self.dy,\
x,\
z)
Yxh,Xxh,Zxh = np.meshgrid(np.append(0.0,y)+self.dy/2.0 - (self.div+0.5)*self.dy, \
x,\
np.append(0.0,z)+self.dz/2.0)
Yyh,Xyh,Zyh = np.meshgrid(y-(self.div+0.5)*self.dy,\
np.append(0.0,x)+self.dx/2.0,\
np.append(0.0,z)+self.dz/2.0)
Yzh,Xzh,Zzh = np.meshgrid(np.append(0.0,y)+self.dy/2.0-(self.div+0.5)*self.dy,\
np.append(0.0,x)+self.dx/2.0,\
z)
# matOut.savemat('grids', {'Y':Y, 'X':X, 'Yyh':Yyh, 'Xyh':Xyh, 'Yxh':Yxh, 'Xxh':Xxh})
ni = 1;
nt = np.sqrt((self.eHS-self.sHS/(1j*self.w))*self.muo)/np.sqrt(self.epso*self.muo);
# transmitted angle angle
# thi = 45*np.pi/180 # taken as input argument.
tht = np.arcsin(ni*np.sin(thi)/nt);
# create the coefficients to specify the space.
kinc = -1*np.array([np.sin(thi)*np.cos(phi), np.cos(thi), -np.sin(phi)*np.sin(thi)])
ktx = -1*np.array([np.sin(tht)*np.cos(phi), np.cos(tht), -np.sin(phi)*np.sin(thi)])
kFS = 1j*self.kfree;
kHS = 1j*self.kHS;
etaF = np.sqrt(self.muo/self.epso);
etaH = np.sqrt(self.muo/(self.eHS+(1j*self.sHS/self.w)));
rTE = (ni*np.cos(thi) - nt*np.cos(tht))/(ni*np.cos(thi) + nt*np.cos(tht));
tTE = (2*ni*np.cos(thi))/(ni*np.cos(thi) + nt*np.cos(tht));
''' make selectors for the subspace and so on'''
thsz = np.zeros((self.nx,self.ny,self.nz+1),dtype='bool')
thsz[Yz<0] = 1
thsx = np.zeros((self.nx+1,self.ny,self.nz),dtype='bool')
thsx[Yx<0] = 1
# thsy = thsy.astype(bool)
thsy = np.zeros((self.nx,self.ny+1,self.nz),dtype='bool')
thsy[Yy<0] = 1
''' make selectors for the halfgrid subspace '''
thsxh = np.zeros((self.nx,self.ny+1,self.nz+1),dtype='bool')
thsxh[Yxh<0] = 1
thsyh = np.zeros((self.nx+1,self.ny,self.nz+1),dtype='bool')
thsyh[Yyh<0] = 1
thszh = np.zeros((self.nx+1,self.ny+1,self.nz),dtype='bool')
thszh[Yzh<0] = 1
''' in the matlab version, I stick some eHS in here at this point - not sure if I need that now '''
Ezinc = np.cos(phi)*te_ezf(Xz,Yz,Zz,thsz,kinc,ktx,rTE,tTE,kFS,kHS)
Exinc = np.sin(phi)*te_ezf(Xx,Yx,Zx,thsx,kinc,ktx,rTE,tTE,kFS,kHS)
Eyinc = np.zeros((self.nx,self.ny+1,self.nz),dtype='complex128')
Hxinc = te_hf(Xxh,Yxh,Zxh,thsxh,kinc,ktx,rTE,tTE,kFS,kHS)
Hxinc[~thsxh] = Hxinc[~thsxh]*(1.0/etaF)*np.cos(phi)*kinc[1]
Hxinc[thsxh] = Hxinc[thsxh]*(1.0/etaH)*np.cos(phi)*ktx[1]
Hyinc = te_ezf(Xyh,Yyh,Zyh,thsyh,kinc,ktx,rTE,tTE,kFS,kHS)
Hyinc[~thsyh] = Hyinc[~thsyh]*(1.0/etaF)*(kinc[2]*np.sin(phi) - kinc[0]*np.cos(phi))
Hyinc[thsyh] = Hyinc[thsyh]*(1.0/etaH)*(ktx[2]*np.sin(phi) - ktx[0]*np.cos(phi))
Hzinc = te_ezf(Xzh,Yzh,Zzh,thszh,kinc,ktx,rTE,tTE,kFS,kHS)
Hzinc[~thszh] = Hzinc[~thszh]*(1.0/etaF)*(-np.sin(phi)*kinc[1])
Hzinc[thszh] = Hzinc[thszh]*(1.0/etaH)*(-np.sin(phi)*kinc[1])
# spio.savemat('intrn', {'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc})
xl = instep-1; xr = self.nx-instep-1;
yb = instep-1; yt = self.ny-instep-1;
zb = instep-1; zt = self.nz-instep-1;
Jsrcx = np.zeros([self.nx+1,self.ny,self.nz],dtype='complex128')
Jsrcy = np.zeros([self.nx,self.ny+1,self.nz],dtype='complex128')
Jsrcz = np.zeros([self.nx,self.ny,self.nz+1],dtype='complex128')
Msrcx = np.zeros([self.nx,self.ny+1,self.nz+1], dtype='complex128')
Msrcy = np.zeros([self.nx+1,self.ny,self.nz+1], dtype='complex128')
Msrcz = np.zeros([self.nx+1,self.ny+1,self.nz], dtype='complex128')
# 5.48a
Jsrcx[xl+1:(1+xr),yb,zb:(1+zt)] = Jsrcx[xl+1:(1+xr),yb,zb:(1+zt)] + Hzinc[xl+1:(1+xr),yb,zb:(1+zt)]/self.dx
# 5.49a
Jsrcx[xl+1:(1+xr),yt,zb:(1+zt)] = Jsrcx[xl+1:(1+xr),yt,zb:(1+zt)] - Hzinc[xl+1:(1+xr),yt+1,zb:(1+zt)]/self.dx
# 5.48b
Jsrcz[xl:(1+xr),yb,zb+1:(1+zt)] = Jsrcz[xl:(1+xr),yb,zb+1:(1+zt)] - Hxinc[xl:(1+xr),yb,zb+1:(1+zt)]/self.dx
# 5.49b
Jsrcz[xl:(1+xr),yt,zb+1:(1+zt)] = Jsrcz[xl:(1+xr),yt,zb+1:(1+zt)] + Hxinc[xl:(1+xr),yt+1,zb+1:(1+zt)]/self.dx
# 5.50a
Jsrcx[xl+1:(1+xr),yb:(1+yt),zb] = Jsrcx[xl+1:(1+xr),yb:(1+yt),zb] - Hyinc[xl+1:(1+xr),yb:(1+yt),zb]/self.dx
# 5.50b
Jsrcy[xl:(1+xr),yb+1:(1+yt),zb] = Jsrcy[xl:(1+xr),yb+1:(1+yt),zb] + Hxinc[xl:(1+xr),yb+1:(1+yt),zb]/self.dx
# 5.51a
Jsrcx[xl+1:(1+xr),yb:(1+yt),zt] = Jsrcx[xl+1:(1+xr),yb:(1+yt),zt] + Hyinc[xl+1:(1+xr),yb:(1+yt),zt+1]/self.dx
# 5.51b
Jsrcy[xl:(1+xr),yb+1:(1+yt),zt] = Jsrcy[xl:(1+xr),yb+1:(1+yt),zt] - Hxinc[xl:(1+xr),yb+1:(1+yt),zt+1]/self.dx
# 5.52a
Jsrcy[xl,yb+1:(1+yt),zb:(1+zt)] = Jsrcy[xl,yb+1:(1+yt),zb:(1+zt)] - Hzinc[xl,yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.52b
Jsrcz[xl,yb:(1+yt),zb+1:(1+zt)] = Jsrcz[xl,yb:(1+yt),zb+1:(1+zt)] + Hyinc[xl,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.53a
Jsrcy[xr,yb+1:(1+yt),zb:(1+zt)] = Jsrcy[xr,yb+1:(1+yt),zb:(1+zt)] + Hzinc[xr+1,yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.53b
Jsrcz[xr,yb:(1+yt),zb+1:(1+zt)] = Jsrcz[xr,yb:(1+yt),zb+1:(1+zt)] - Hyinc[xr+1,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.54a
Msrcz[xl+1:(1+xr),yb,zb:(1+zt)] = Msrcz[xl+1:(1+xr),yb,zb:(1+zt)] - Exinc[xl+1:(1+xr),yb,zb:(1+zt)]/self.dx
# 5.54b
Msrcx[xl:(1+xr),yb,zb+1:(1+zt)] = Msrcx[xl:(1+xr),yb,zb+1:(1+zt)] + Ezinc[xl:(1+xr),yb,zb+1:(1+zt)]/self.dx
# 5.55a
Msrcz[xl+1:(1+xr),yt+1,zb+1:(1+zt)] = Msrcz[xl+1:(1+xr),yt+1,zb+1:(1+zt)] + Exinc[xl+1:(1+xr),yt, zb+1:(1+zt)]/self.dx
# 5.55b
Msrcx[xl:(1+xr),yt+1,zb+1:(1+zt)] = Msrcx[xl:(1+xr),yt+1,zb+1:(1+zt)] - Ezinc[xl:(1+xr),yt,zb+1:(1+zt)]/self.dx
# 5.56a
Msrcy[xl+1:(1+xr),yb:(1+yt),zb] = Msrcy[xl+1:(1+xr),yb:(1+yt),zb] + Exinc[xl+1:(1+xr),yb:(1+yt),zb]/self.dx
# 5.56b
Msrcx[xl:(1+xr),yb+1:(1+yt),zb] = Msrcx[xl:(1+xr),yb+1:(1+yt),zb] - Eyinc[xl:(1+xr),yb+1:(1+yt),zb]/self.dx
# 5.57a
Msrcy[xl+1:(1+xr),yb:(1+yt),zt+1] = Msrcy[xl+1:(1+xr),yb:(1+yt),zt+1] - Exinc[xl+1:(1+xr),yb:(1+yt),zt]/self.dx
# 5.57b
Msrcx[xl:(1+xr),yb+1:(1+yt),zt+1] = Msrcx[xl:(1+xr),yb+1:(1+yt),zt+1] + Eyinc[xl:(1+xr),yb+1:(1+yt),zt]/self.dx
# 5.58a
Msrcz[xl, yb+1:(1+yt),zb:(1+zt)] = Msrcz[xl, yb+1:(1+yt), zb:(1+zt)] + Eyinc[xl, yb+1:(1+yt), zb:(1+zt)]/self.dx
# 5.58b
Msrcy[xl, yb:(1+yt), zb+1:(1+zt)] = Msrcy[xl,yb:(1+yt),zb+1:(1+zt)] - Ezinc[xl,yb:(1+yt),zb+1:(1+zt)]/self.dx
# 5.59a
Msrcz[xr+1,yb+1:(1+yt),zb:(1+zt)] = Msrcz[xr+1,yb+1:(1+yt),zb:(1+zt)] - Eyinc[xr, yb+1:(1+yt),zb:(1+zt)]/self.dx
# 5.59b
Msrcy[xr+1,yb:(1+yt),zb+1:(1+zt)] = Msrcy[xr+1,yb:(1+yt),zb+1:(1+zt)] + Ezinc[xr,yb:(1+yt),zb+1:(1+zt)]/self.dx
# spio.savemat('intrn', {'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc})
spio.savemat('intrn', {'sigx':self.sigX, 'sigy':self.sigY, 'sigz':self.sigZ,\
'exinc':Exinc,'eyinc':Eyinc,'ezinc':Ezinc,\
'hxinc':Hxinc,'hyinc':Hyinc,'hzinc':Hzinc,\
'jsrcx':Jsrcx,'jsrcy':Jsrcy,'jsrcz':Jsrcz, \
'msrcx':Msrcx,'msrcy':Msrcy,'msrcz':Msrcz})
J = np.concatenate((Jsrcx.flatten(),Jsrcy.flatten(),Jsrcz.flatten()))
M = np.concatenate((Msrcx.flatten(),Msrcy.flatten(),Msrcz.flatten()))
# ok. have to make some transformations in order to make this happen.
pd1 = self.ph*self.d1 # maps full to half grid
pd2 = self.po*self.d2
'''make a curl operator from M -> J '''
nex = (self.nx+1)*self.ny*self.nz
ney = self.nx*(self.ny+1)*self.nz
nez = self.nx*self.ny*(self.nz+1)
nhx = self.nx*(self.ny+1)*(self.nz+1)
nhy = (self.nx+1)*self.ny*(self.nz+1)
nhz = (self.nx+1)*(self.ny+1)*self.nz
AA = sparse.coo_matrix((nex,nhx))
AB = -sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))
AC = sparse.kron(speye(self.nx+1),sparse.kron(pd2,speye(self.nz)))
BA = sparse.kron(speye(self.nx+1),sparse.kron(speye(self.ny),pd2))
BB = sparse.coo_matrix((ney,nhy))
BC = -sparse.kron(pd2,sparse.kron(speye(self.ny+1),speye(self.nz)))
CA = -sparse.kron(speye(self.nx),sparse.kron(pd2,speye(self.nz+1)))
CB = sparse.kron(pd2,sparse.kron(speye(self.ny),speye(self.nz+1)))
CC = sparse.coo_matrix((nez,nhz))
srB = spt.vCat([spt.hCat([AA,AB,AC]), spt.hCat([BA,BB,BC]), spt.hCat([CA,CB,CC])])
self.rhs = -(1j*self.muo*self.w*J + srB*M)
def getS(self):
''' return the coefficient necessary in the Md*P part to make things work '''
return self.w*self.muo*1j
def te_ezf(X,Y,Z,xi,kinc,ktx,rTE,tTE,kFS,kHS):
m,n,p = X.shape
ez = np.zeros((m,n,p),dtype='complex128')
ez[~xi] = np.exp(kFS*(X[~xi]*kinc[0] + \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2])) + \
rTE*np.exp(kFS*(X[~xi]*kinc[0] - \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2]))
ez[xi] = tTE*np.exp(kHS*(X[xi]*ktx[0] + \
Y[xi]*ktx[1] + \
Z[xi]*ktx[2]))
return ez
def te_hf(X,Y,Z,xi,kinc,ktx,rTE,tTE,kFS,kHS):
''' right the only difference is in the sign of the reflected wave '''
m,n,p = X.shape
ez = np.zeros((m,n,p),dtype='complex128')
ez[~xi] = np.exp(kFS*(X[~xi]*kinc[0] + \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2])) - \
rTE*np.exp(kFS*(X[~xi]*kinc[0] - \
Y[~xi]*kinc[1] + \
Z[~xi]*kinc[2]))
ez[xi] = tTE*np.exp(kHS*(X[xi]*ktx[0] + \
Y[xi]*ktx[1] + \
Z[xi]*ktx[2]))
return ez
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Regression tests for BibKnowledge."""
import unittest
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL
from invenio.bibknowledge import kb_exists, get_kba_values, \
get_kbr_keys, get_kbd_values_for_bibedit, get_kbs_info, add_kb,\
delete_kb, add_kb_mapping, remove_kb_mapping, get_kb_name, kb_mapping_exists, \
get_kbt_items_for_bibedit
from invenio.testutils import make_test_suite, run_test_suite, test_web_page_content
class BibknowledgeTests(unittest.TestCase):
"""Test functions for bibknowledge."""
def test_kb_pages_available(self):
"""bibknowledge - test /kb page availability"""
kbpage = CFG_SITE_URL+"/kb"
errs = test_web_page_content(kbpage)
self.assertEqual([], errs)
def test_kb_pages_curator_can_read(self):
"""bibknowledge - test that balthasar from the curator group can read page"""
kbpage = CFG_SITE_URL+"/kb"
errs = test_web_page_content(kbpage, username="balthasar",
password="b123althasar",
expected_text="BibKnowledge Admin")
self.assertEqual([], errs)
def test_EJOURNALS_exists(self):
"""bibknowledge - test that EJOURNALS kb is there"""
isthere = kb_exists("EJOURNALS")
self.assertEqual(True, isthere)
def test_kbs_info(self):
"""bibknowledge - get_kbs_info returns EJOURNALS info"""
myinfolist = get_kbs_info("", "EJOURNALS")
myinfo = myinfolist[0]
self.assertEqual(myinfo["name"],"EJOURNALS")
def test_EJOURNALS_keys(self):
"""bibknowledge - test a left/right rule"""
mykeys = get_kbr_keys("EJOURNALS", "Acta")
self.assertEqual(2, len(mykeys))
def test_get_kba_values(self):
"""bibknowledge - test recovering just values"""
mylist = get_kba_values("EJOURNALS")
self.assertEqual(327, len(mylist))
def test_add_get_remove(self):
"""bibknowledge - test creating a kb, adding a mapping, removing it, removing kb"""
new_kb_id = add_kb()
new_name = get_kb_name(new_kb_id)
add_kb_mapping(new_name, "foobar", "barfoo")
fbexists = kb_mapping_exists(new_name, "foobar")
self.assertEqual(True, fbexists)
remove_kb_mapping(new_name, "foobar")
fbexists = kb_mapping_exists(new_name, "foobar")
self.assertEqual(False, fbexists)
delete_kb(new_name)
still_there = kb_exists(new_name)
self.assertEqual(False, still_there)
def test_kb_for_bibedit(self):
"""bibknowledge - test a dynamic db"""
myvalues = get_kbd_values_for_bibedit("100__a", "", "Ellis")
self.assertEqual(1, len(myvalues))
def test_taxonomy(self):
"""bibknowledge - test a taxonomy"""
username = "balthasar"
password = "b123althasar"
#create a new taxonomy kb
new_kb_id = add_kb("testtaxonomy","taxonomy")
#what was the name?
new_kb_name = get_kb_name(new_kb_id)
#get the taxonomy file
import mechanize
response = mechanize.urlopen("http://cdsware.cern.ch/download/invenio-demo-site-files/HEP.rdf")
content = response.read()
f = open("HEP.rdf","w")
f.write(content)
f.close()
#upload it to the right destination, but log in first
browser = mechanize.Browser()
browser.open(CFG_SITE_SECURE_URL + "/youraccount/login")
browser.select_form(nr=0)
browser['p_un'] = username
browser['p_pw'] = password
browser.submit()
#go to upload page
uploadpage = browser.open(CFG_SITE_URL+"/kb?kb="+str(new_kb_id))
#check that we are there
content = uploadpage.read()
namethere = content.count("testtaxonomy")
assert namethere > 0
#upload
browser.open(CFG_SITE_URL+"/kb?kb="+str(new_kb_id))
browser.select_form(name="upload")
browser.form["kb"] = str(new_kb_id) #force the id
browser.form.add_file(open("HEP.rdf"), content_type='text/plain', filename="HEP.rdf", name="file")
browser.submit()
#check that we can get an item from the kb
items = get_kbt_items_for_bibedit(new_kb_name, "prefLabel", "Altarelli")
#item should contain 1 string: 'Altarelli-Parisi equation'
self.assertEqual(1, len(items))
TEST_SUITE = make_test_suite(BibknowledgeTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
BibKnowledge: fix taxonomy regression test
* Solve "permisions denied" problem adding the CFG_TMPDIR to the path.
* Delete the test file and the test KB afterwards.
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Regression tests for BibKnowledge."""
import unittest
from os import remove
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_TMPDIR
from invenio.bibknowledge import kb_exists, get_kba_values, \
get_kbr_keys, get_kbd_values_for_bibedit, get_kbs_info, add_kb,\
delete_kb, add_kb_mapping, remove_kb_mapping, get_kb_name, kb_mapping_exists, \
get_kbt_items_for_bibedit
from invenio.testutils import make_test_suite, run_test_suite, test_web_page_content
class BibknowledgeTests(unittest.TestCase):
"""Test functions for bibknowledge."""
def test_kb_pages_available(self):
"""bibknowledge - test /kb page availability"""
kbpage = CFG_SITE_URL+"/kb"
errs = test_web_page_content(kbpage)
self.assertEqual([], errs)
def test_kb_pages_curator_can_read(self):
"""bibknowledge - test that balthasar from the curator group can read page"""
kbpage = CFG_SITE_URL+"/kb"
errs = test_web_page_content(kbpage, username="balthasar",
password="b123althasar",
expected_text="BibKnowledge Admin")
self.assertEqual([], errs)
def test_EJOURNALS_exists(self):
"""bibknowledge - test that EJOURNALS kb is there"""
isthere = kb_exists("EJOURNALS")
self.assertEqual(True, isthere)
def test_kbs_info(self):
"""bibknowledge - get_kbs_info returns EJOURNALS info"""
myinfolist = get_kbs_info("", "EJOURNALS")
myinfo = myinfolist[0]
self.assertEqual(myinfo["name"],"EJOURNALS")
def test_EJOURNALS_keys(self):
"""bibknowledge - test a left/right rule"""
mykeys = get_kbr_keys("EJOURNALS", "Acta")
self.assertEqual(2, len(mykeys))
def test_get_kba_values(self):
"""bibknowledge - test recovering just values"""
mylist = get_kba_values("EJOURNALS")
self.assertEqual(327, len(mylist))
def test_add_get_remove(self):
"""bibknowledge - test creating a kb, adding a mapping, removing it, removing kb"""
new_kb_id = add_kb()
new_name = get_kb_name(new_kb_id)
add_kb_mapping(new_name, "foobar", "barfoo")
fbexists = kb_mapping_exists(new_name, "foobar")
self.assertEqual(True, fbexists)
remove_kb_mapping(new_name, "foobar")
fbexists = kb_mapping_exists(new_name, "foobar")
self.assertEqual(False, fbexists)
delete_kb(new_name)
still_there = kb_exists(new_name)
self.assertEqual(False, still_there)
def test_kb_for_bibedit(self):
"""bibknowledge - test a dynamic db"""
myvalues = get_kbd_values_for_bibedit("100__a", "", "Ellis")
self.assertEqual(1, len(myvalues))
def test_taxonomy(self):
"""bibknowledge - test a taxonomy"""
username = "balthasar"
password = "b123althasar"
#create a new taxonomy kb
new_kb_id = add_kb("testtaxonomy","taxonomy")
#what was the name?
new_kb_name = get_kb_name(new_kb_id)
#get the taxonomy file
import mechanize
response = mechanize.urlopen("http://cdsware.cern.ch/download/invenio-demo-site-files/HEP.rdf")
content = response.read()
f = open(CFG_TMPDIR+"/HEP.rdf","w")
f.write(content)
f.close()
#upload it to the right destination, but log in first
browser = mechanize.Browser()
browser.open(CFG_SITE_SECURE_URL + "/youraccount/login")
browser.select_form(nr=0)
browser['p_un'] = username
browser['p_pw'] = password
browser.submit()
#go to upload page
uploadpage = browser.open(CFG_SITE_URL+"/kb?kb="+str(new_kb_id))
#check that we are there
content = uploadpage.read()
namethere = content.count("testtaxonomy")
assert namethere > 0
#upload
browser.open(CFG_SITE_URL+"/kb?kb="+str(new_kb_id))
browser.select_form(name="upload")
browser.form["kb"] = str(new_kb_id) #force the id
browser.form.add_file(open(CFG_TMPDIR+"/HEP.rdf"), content_type='text/plain', filename="HEP.rdf", name="file")
browser.submit()
#check that we can get an item from the kb
items = get_kbt_items_for_bibedit(new_kb_name, "prefLabel", "Altarelli")
#item should contain 1 string: 'Altarelli-Parisi equation'
self.assertEqual(1, len(items))
#delete the temp file
remove(CFG_TMPDIR+"/HEP.rdf")
#delete the test odf the DB
delete_kb(new_kb_name)
still_there = kb_exists(new_kb_name)
self.assertEqual(False, still_there)
TEST_SUITE = make_test_suite(BibknowledgeTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
# Based on code Created by Laurens van der Maaten on 20-12-08.
# see point #4
import numpy
import sys
try:
import psyco
psyco.full()
print >> sys.stderr, "psyco is usable!"
except:
print >> sys.stderr, "No psyco"
class TSNE(object):
def __init__(self, default_iterations = 1000, perplexity = 15, desired_dims=2):
self.default_iterations = default_iterations
self.perplexity=perplexity
self.out_dims=desired_dims
self.initial_momentum = 0.5
self.final_momentum = 0.8
self.eta = 500
self.sigma_iterations = 50 # number of iterations to try when finding sigma that matches perplexity *for each row of P, for every iteration*
self.min_gain = 0.01
self.tol = 1e-5
def Hbeta(self, D = numpy.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = numpy.exp(-D.copy() * beta);
sumP = sum(P);
H = numpy.log(sumP) + beta * numpy.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def x2p(self):
"""Performs a binary search to get P-values in such a way that each conditional Gaussian has the target perplexity."""
X = self.codes
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape
sum_X = numpy.sum(numpy.square(X), 1)
D = numpy.add(numpy.add(-2 * numpy.dot(X, X.T), sum_X).T, sum_X)
P = numpy.zeros((n, n))
sigmas = []
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 500 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
distances_to_i = D[i, numpy.concatenate((numpy.r_[0:i], numpy.r_[i+1:n]))];
#print distances_to_i
thisP, sigma = self.get_row_of_P(distances_to_i,self.perplexity)
#print thisP, sigma
# Set the row of P we just worked out
P[i, numpy.concatenate((numpy.r_[0:i], numpy.r_[i+1:n]))] = thisP;
sigmas.append(sigma)
# Return final P-matrix
print "Mean value of sigma: ", numpy.mean(sigmas)
return P;
def get_row_of_P(self, distances, perplexity):
log_perplexity = numpy.log(perplexity)
# Binary search for a value of beta that achieves the required
# perplexity. Then returns the corresponding P-vector.
betamin = -numpy.inf;
betamax = numpy.inf;
beta = 1 #star guess
# Compute the Gaussian kernel and entropy for the current beta
(H, thisP) = self.Hbeta(distances, beta) #first guess, probably wrong
# Evaluate whether the perplexity is within tolerance
Hdiff = H - log_perplexity
tries = 0
while numpy.abs(Hdiff) > self.tol and tries < self.sigma_iterations:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta
if betamax == numpy.inf or betamax == -numpy.inf:
beta = beta * 2
else:
beta = (beta + betamax) / 2
else:
betamax = beta
if betamin == numpy.inf or betamin == -numpy.inf:
beta = beta / 2
else:
beta = (beta + betamin) / 2
# not yet within tolerance - recompute the values
(H, thisP) = self.Hbeta(distances, beta)
Hdiff = H - log_perplexity
tries = tries + 1
sigma = numpy.sqrt(1 / beta)
return thisP, sigma
def initialize_with_codes(self, codes):
self.codes = numpy.array(codes)
(n, d) = self.codes.shape
self.code_dims = d
#random initialize the coords
print '!randomly initializing the coordinates!'
self.coords = numpy.random.randn(n, self.out_dims)
def load_from_file(self, coords_file, codes_file):
codes = numpy.genfromtxt(codes_file, dtype=numpy.float32, delimiter=',')
coords = numpy.genfromtxt(coords_file, dtype=numpy.float32, delimiter=',')
if codes.dtype != "float32":
print "Error: file of codes should have type float32.";
return -1;
if (codes.shape[0] != coords.shape[0]):
print "Error: file of codes and coords should have equal number of rows at this point.";
return -1;
#throway the index in first column (not sure why that is there)
self.codes = codes[:,1:]
self.coords = coords
print 'loaded coords and codes from %s, %s respectively' %(coords_file, codes_file)
def save_coords_to_file(self, coords_file):
print 'saving coords to %s' %(coords_file)
numpy.savetxt(coords_file, self.coords, delimiter=',')
def get_coord_for_code(self, code, iterations = None):
if (iterations==None):
iterations = self.default_iterations
# Initialize variables
X = numpy.vstack(self.codes, code)
#randomize the start_coord, somewhere towards middle
y = numpy.random.randn(1, self.out_dims)
Y.append(self.coords, y,0 )
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
(n, d) = X.shape;
assert(d==self.code_dims) #these should be the same, right?
dy = numpy.zeros((1, self.out_dims));
iy = numpy.zeros((1, self.out_dims));
gain = numpy.ones((1, self.out_dims));
# work out distances from this point to all other points (in high-d space)
# FIXME: function below here should be changed to the
# proper 1-D case
sum_X = numpy.sum(numpy.square(X), 1)
D = numpy.add(numpy.add(-2 * numpy.dot(X, X.T), sum_X).T, sum_X)
distances = D[-1]
# i tried the following and even though it has the right
# shape, it gives the wrong result! ;-( - MKT
#sum_x = numpy.sum(numpy.square(code), 0)
#distances = numpy.add(numpy.add(-2 * numpy.dot(code, X.T), sum_x).T, sum_x)
#get P for 'this code' to all other codes
thisP = self.get_row_of_P(distances, self.perplexity)
# don't need this any more since we are doing the above - MKT
#P = self.x2p();
#P = P + numpy.transpose(P);
#P = P / numpy.sum(P);
#P = P * 4; # early exaggeration
#P = numpy.maximum(P, 1e-12);
# ALL FUNCTIONS BELOW NEED TO BE CHANGED TO THE
# 1-D CASE (instead of matrix case)
# Run iterations
for iter in range(iterations):
# Compute pairwise affinities
sum_Y = numpy.sum(numpy.square(Y), 1);
num = 1 / (1 + numpy.add(numpy.add(-2 * numpy.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / numpy.sum(num);
Q = numpy.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = numpy.sum(numpy.tile(PQ[:,i] * num[:,i], (self.out_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - numpy.tile(numpy.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
print "P ,", P
print "Q " , Q
C = numpy.sum(P * numpy.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# update solution
self.coords = Y;
def fit(self, iterations = None):
"""Descends along tsne gradient path for specified number of iterations"""
if (iterations==None):
iterations = self.default_iterations
if self.codes.dtype != "float32":
print "Error: array of codes should have type float32.";
return -1;
# Initialize variables
X = self.codes
Y = self.coords
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
(n, d) = X.shape;
assert(d==self.code_dims) #these should be the same, right?
dY = numpy.zeros((n, self.out_dims));
iY = numpy.zeros((n, self.out_dims));
gains = numpy.ones((n, self.out_dims));
# Compute P-values
P = self.x2p();
P = P + numpy.transpose(P);
P = P / numpy.sum(P);
P = P * 4; # early exaggeration
P = numpy.maximum(P, 1e-12);
# Run iterations
for iter in range(iterations):
# Compute pairwise affinities
sum_Y = numpy.sum(numpy.square(Y), 1);
num = 1 / (1 + numpy.add(numpy.add(-2 * numpy.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / numpy.sum(num);
Q = numpy.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = numpy.sum(numpy.tile(PQ[:,i] * num[:,i], (self.out_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - numpy.tile(numpy.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = numpy.sum(P * numpy.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# update solution
self.coords = Y;
#f __name__ == "__main__":
#print "Run Y = tsne.tsne(X, self.out_dims, perplexity) to perform t-SNE on your dataset."
#print "Running example on 2,500 MNIST digits..."
#X = numpy.loadtxt("mnist2500_X.txt");
#labels = numpy.loadtxt("mnist2500_labels.txt");
#Y = tsne(X, 2, 50, 20.0);
#pylab.scatter(Y[:,0], Y[:,1], 20, labels);
fixed the int-should-be-float problem, but it's still giving garbage out when run on the mnist codes.
# Based on code Created by Laurens van der Maaten on 20-12-08.
# see point #4
import numpy
import sys
try:
import psyco
psyco.full()
print >> sys.stderr, "psyco is usable!"
except:
print >> sys.stderr, "No psyco"
class TSNE(object):
def __init__(self, default_iterations = 1000, perplexity = 15, desired_dims=2):
self.default_iterations = default_iterations
self.perplexity=perplexity
self.out_dims=desired_dims
self.initial_momentum = 0.5
self.final_momentum = 0.8
self.eta = 500
self.sigma_iterations = 50 # number of iterations to try when finding sigma that matches perplexity *for each row of P, for every iteration*
self.min_gain = 0.01
self.tol = 1e-5
def initialize_with_codes(self, codes):
self.codes = numpy.array(codes)
(n, d) = self.codes.shape # n rows, d columns
self.code_dims = d
#randomly initialize the coords
print '!randomly initializing the coordinates!'
self.coords = numpy.random.randn(n, self.out_dims)
# Note there are smarter ways to initialise, eg. use the
# projections onto the first 2 principal components for
# example, that would make tsne's job easier by starting
# coords in the right ball-park. Something to keep in mind if
# we think local minima are a problem.
def fit(self, iterations = None):
"""Descends along tsne gradient path for the specified number of iterations"""
if (iterations==None):
iterations = self.default_iterations
if self.codes.dtype != "float32":
print "Error: array of codes should have type float32.";
return -1;
# Initialize variables
X = self.codes
Y = self.coords
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
(n, d) = X.shape;
assert(d==self.code_dims) #these should be the same, right?
dY = numpy.zeros((n, self.out_dims));
iY = numpy.zeros((n, self.out_dims));
gains = numpy.ones((n, self.out_dims));
# Compute P-values
print "Computing the P-values first"
P = self.x2p();
P = P + numpy.transpose(P);
P = P / numpy.sum(P);
P = P * 4; # early exaggeration
P = numpy.maximum(P, 1e-12);
# Run iterations
print "Doing the tsne minimization"
for iter in range(iterations):
# Compute pairwise affinities
sum_Y = numpy.sum(numpy.square(Y), 1);
num = 1 / (1 + numpy.add(numpy.add(-2 * numpy.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / numpy.sum(num);
Q = numpy.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = numpy.sum(numpy.tile(PQ[:,i] * num[:,i], (self.out_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - numpy.tile(numpy.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
C = numpy.sum(P * numpy.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# Stop lying about P-values
if iter == 100:
P = P / 4;
# update solution
self.coords = Y;
def x2p(self):
"""Performs a binary search to get P-values in such a way that
each conditional Gaussian has the target perplexity."""
X = self.codes
# Initialize some variables
print "Computing pairwise distances..."
(n, d) = X.shape
sum_X = numpy.sum(numpy.square(X), 1)
D = numpy.add(numpy.add(-2 * numpy.dot(X, X.T), sum_X).T, sum_X)
P = numpy.zeros((n, n))
sigmas = []
# Loop over all datapoints
for i in range(n):
# Print progress
if i % 50 == 0:
print "Computing P-values for point ", i, " of ", n, "..."
distances_to_i = D[i, numpy.concatenate((numpy.r_[0:i], numpy.r_[i+1:n]))];
#print distances_to_i
thisP, sigma = self.get_row_of_P(distances_to_i,self.perplexity)
# Set the row of P we just worked out
P[i, numpy.concatenate((numpy.r_[0:i], numpy.r_[i+1:n]))] = thisP;
sigmas.append(sigma)
# Return final P-matrix
print "Mean value of sigma: ", numpy.mean(sigmas)
return P;
def Hbeta(self, D = numpy.array([]), beta = 1.0):
"""Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution."""
# Compute P-row and corresponding perplexity
P = numpy.exp(-D.copy() * beta);
sumP = sum(P);
H = numpy.log(sumP) + beta * numpy.sum(D * P) / sumP;
P = P / sumP;
return H, P;
def get_row_of_P(self, distances, perplexity):
log_perplexity = numpy.log(perplexity)
# Binary search for a value of beta that achieves the required
# perplexity. Then returns the corresponding P-vector.
betamin = -numpy.inf;
betamax = numpy.inf;
beta = 1.0 #star guess
# Compute the Gaussian kernel and entropy for the current beta
(H, thisP) = self.Hbeta(distances, beta) #first guess, probably wrong
# Evaluate whether the perplexity is within tolerance
Hdiff = H - log_perplexity
tries = 0
while numpy.abs(Hdiff) > self.tol and tries < self.sigma_iterations:
# If not, increase or decrease precision
if Hdiff > 0:
betamin = beta
if betamax == numpy.inf or betamax == -numpy.inf:
beta = beta * 2
else:
beta = (beta + betamax) / 2
else:
betamax = beta
if betamin == numpy.inf or betamin == -numpy.inf:
beta = beta / 2
else:
beta = (beta + betamin) / 2
# not yet within tolerance - recompute the values
(H, thisP) = self.Hbeta(distances, beta)
Hdiff = H - log_perplexity
tries = tries + 1
sigma = numpy.sqrt(1.0 / beta)
return thisP, sigma
def load_from_file(self, coords_file, codes_file):
codes = numpy.genfromtxt(codes_file, dtype=numpy.float32, delimiter=',')
coords = numpy.genfromtxt(coords_file, dtype=numpy.float32, delimiter=',')
if codes.dtype != "float32":
print "Error: file of codes should have type float32.";
return -1;
if (codes.shape[0] != coords.shape[0]):
print "Error: file of codes and coords should have equal number of rows at this point.";
return -1;
#throway the index in first column (not sure why that is there)
self.codes = codes[:,1:]
self.coords = coords
print 'loaded coords and codes from %s, %s respectively' %(coords_file, codes_file)
def get_coord_for_code(self, code, iterations = None):
if (iterations==None):
iterations = self.default_iterations
# Initialize variables
X = numpy.vstack(self.codes, code)
#randomize the start_coord, somewhere towards middle
y = numpy.random.randn(1, self.out_dims)
Y.append(self.coords, y,0 )
initial_momentum = 0.5;
final_momentum = 0.8;
eta = 500;
min_gain = 0.01;
(n, d) = X.shape;
assert(d==self.code_dims) #these should be the same, right?
dy = numpy.zeros((1, self.out_dims));
iy = numpy.zeros((1, self.out_dims));
gain = numpy.ones((1, self.out_dims));
# work out distances from this point to all other points (in high-d space)
# FIXME: function below here should be changed to the
# proper 1-D case
sum_X = numpy.sum(numpy.square(X), 1)
D = numpy.add(numpy.add(-2 * numpy.dot(X, X.T), sum_X).T, sum_X)
distances = D[-1]
# i tried the following and even though it has the right
# shape, it gives the wrong result! ;-( - MKT
#sum_x = numpy.sum(numpy.square(code), 0)
#distances = numpy.add(numpy.add(-2 * numpy.dot(code, X.T), sum_x).T, sum_x)
#get P for 'this code' to all other codes
thisP = self.get_row_of_P(distances, self.perplexity)
# don't need this any more since we are doing the above - MKT
#P = self.x2p();
#P = P + numpy.transpose(P);
#P = P / numpy.sum(P);
#P = P * 4; # early exaggeration
#P = numpy.maximum(P, 1e-12);
# ALL FUNCTIONS BELOW NEED TO BE CHANGED TO THE
# 1-D CASE (instead of matrix case)
# Run iterations
for iter in range(iterations):
# Compute pairwise affinities
sum_Y = numpy.sum(numpy.square(Y), 1);
num = 1 / (1 + numpy.add(numpy.add(-2 * numpy.dot(Y, Y.T), sum_Y).T, sum_Y));
num[range(n), range(n)] = 0;
Q = num / numpy.sum(num);
Q = numpy.maximum(Q, 1e-12);
# Compute gradient
PQ = P - Q;
for i in range(n):
dY[i,:] = numpy.sum(numpy.tile(PQ[:,i] * num[:,i], (self.out_dims, 1)).T * (Y[i,:] - Y), 0);
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0));
gains[gains < min_gain] = min_gain;
iY = momentum * iY - eta * (gains * dY);
Y = Y + iY;
Y = Y - numpy.tile(numpy.mean(Y, 0), (n, 1));
# Compute current value of cost function
if (iter + 1) % 10 == 0:
print "P ,", P
print "Q " , Q
C = numpy.sum(P * numpy.log(P / Q));
print "Iteration ", (iter + 1), ": error is ", C
# update solution
self.coords = Y;
def save_coords_to_file(self, coords_file):
print 'saving coords to %s' %(coords_file)
numpy.savetxt(coords_file, self.coords, delimiter=',')
#if __name__ == "__main__":
#print "Run Y = tsne.tsne(X, self.out_dims, perplexity) to perform t-SNE on your dataset."
#print "Running example on 2,500 MNIST digits..."
#X = numpy.loadtxt("mnist2500_X.txt");
#labels = numpy.loadtxt("mnist2500_labels.txt");
#Y = tsne(X, 2, 50, 20.0);
#pylab.scatter(Y[:,0], Y[:,1], 20, labels);
|
"""Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import functools
import logging
import re
from typing import TYPE_CHECKING
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
from pip._internal.utils.urls import url_to_path
if TYPE_CHECKING:
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.version import _BaseVersion
from pip._internal.index.collector import LinkCollector
from pip._internal.models.search_scope import SearchScope
from pip._internal.req import InstallRequirement
from pip._internal.utils.hashes import Hashes
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = (
Tuple[int, int, int, _BaseVersion, BuildTag, Optional[int]]
)
__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder']
logger = logging.getLogger(__name__)
def _check_link_requires_python(
link, # type: Link
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> bool
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python, link,
)
else:
if not is_compatible:
version = '.'.join(map(str, version_info))
if not ignore_requires_python:
logger.debug(
'Link requires a different Python (%s not in: %r): %s',
version, link.requires_python, link,
)
return False
logger.debug(
'Ignoring failed Requires-Python check (%s not in: %r) '
'for link: %s',
version, link.requires_python, link,
)
return True
class LinkEvaluator:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name, # type: str
canonical_name, # type: str
formats, # type: FrozenSet[str]
target_python, # type: TargetPython
allow_yanked, # type: bool
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link):
# type: (Link) -> Tuple[bool, Optional[str]]
"""
Determine whether a link is a candidate for installation.
:return: A tuple (is_candidate, result), where `result` is (1) a
version string if `is_candidate` is True, and (2) if
`is_candidate` is False, an optional string to log the reason
the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or '<none given>'
return (False, f'yanked for reason: {reason}')
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (False, 'not a file')
if ext not in SUPPORTED_EXTENSIONS:
return (False, f'unsupported archive format: {ext}')
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = 'No binaries permitted for {}'.format(
self.project_name)
return (False, reason)
if "macosx10" in link.path and ext == '.zip':
return (False, 'macosx10 one')
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (False, 'invalid wheel filename')
if canonicalize_name(wheel.name) != self._canonical_name:
reason = 'wrong project name (not {})'.format(
self.project_name)
return (False, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = wheel.get_formatted_file_tags()
reason = (
"none of the wheel's tags ({}) are compatible (run pip debug --verbose to show compatible tags)".format(
', '.join(file_tags)
)
)
return (False, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f'No sources permitted for {self.project_name}'
return (False, reason)
if not version:
version = _extract_version_from_fragment(
egg_info, self._canonical_name,
)
if not version:
reason = f'Missing project version for {self.project_name}'
return (False, reason)
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (False, 'Python version is incorrect')
supports_python = _check_link_requires_python(
link, version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
# Return None for the reason text to suppress calling
# _log_skipped_link().
return (False, None)
logger.debug('Found link %s, version: %s', link, version)
return (True, version)
def filter_unallowed_hashes(
candidates, # type: List[InstallationCandidate]
hashes, # type: Hashes
project_name, # type: str
):
# type: (...) -> List[InstallationCandidate]
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
'Given no hashes to check %s links for project %r: '
'discarding no candidates',
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = 'discarding no candidates'
else:
discard_message = 'discarding {} non-matches:\n {}'.format(
len(non_matches),
'\n '.join(str(candidate.link) for candidate in non_matches)
)
logger.debug(
'Checked %s links for project %r against %s hashes '
'(%s matches, %s no digest): %s',
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message
)
return filtered
class CandidatePreferences:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
):
# type: (...) -> None
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates, # type: List[InstallationCandidate]
applicable_candidates, # type: List[InstallationCandidate]
best_candidate, # type: Optional[InstallationCandidate]
):
# type: (...) -> None
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through all candidates.
"""
return iter(self._candidates)
def iter_applicable(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through the applicable candidates.
"""
return iter(self._applicable_candidates)
class CandidateEvaluator:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name, # type: str
target_python=None, # type: Optional[TargetPython]
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name, # type: str
supported_tags, # type: List[Tag]
specifier, # type: specifiers.BaseSpecifier
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> None
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
def get_applicable_candidates(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> List[InstallationCandidate]
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v) for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [
c for c in candidates if str(c.version) in versions
]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate):
# type: (InstallationCandidate) -> CandidateSortingKey
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag = () # type: BuildTag
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
if not wheel.supported(valid_tags):
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
pri = -(wheel.support_index_min(valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash, yank_value, binary_preference, candidate.version,
build_tag, pri,
)
def sort_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> Optional[InstallationCandidate]
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> BestCandidateResult
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector, # type: LinkCollector
target_python, # type: TargetPython
allow_yanked, # type: bool
format_control=None, # type: Optional[FormatControl]
candidate_prefs=None, # type: CandidatePreferences
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links = set() # type: Set[Link]
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector, # type: LinkCollector
selection_prefs, # type: SelectionPreferences
target_python=None, # type: Optional[TargetPython]
):
# type: (...) -> PackageFinder
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
)
@property
def target_python(self):
# type: () -> TargetPython
return self._target_python
@property
def search_scope(self):
# type: () -> SearchScope
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope):
# type: (SearchScope) -> None
self._link_collector.search_scope = search_scope
@property
def find_links(self):
# type: () -> List[str]
return self._link_collector.find_links
@property
def index_urls(self):
# type: () -> List[str]
return self.search_scope.index_urls
@property
def trusted_hosts(self):
# type: () -> Iterable[str]
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self):
# type: () -> bool
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self):
# type: () -> None
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self):
# type: () -> bool
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self):
# type: () -> None
self._candidate_prefs.prefer_binary = True
def make_link_evaluator(self, project_name):
# type: (str) -> LinkEvaluator
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links):
# type: (Iterable[Link]) -> List[Link]
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set() # type: Set[Link]
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link, reason):
# type: (Link, str) -> None
if link not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug('Skipping link: %s: %s', reason, link)
self._logged_links.add(link)
def get_install_candidate(self, link_evaluator, link):
# type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
is_candidate, result = link_evaluator.evaluate_link(link)
if not is_candidate:
if result:
self._log_skipped_link(link, reason=result)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=result,
)
def evaluate_links(self, link_evaluator, links):
# type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(self, project_url, link_evaluator):
# type: (Link, LinkEvaluator) -> List[InstallationCandidate]
logger.debug(
'Fetching project page and analyzing links: %s', project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name):
# type: (str) -> List[InstallationCandidate]
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
collected_links = self._link_collector.collect_links(project_name)
link_evaluator = self.make_link_evaluator(project_name)
find_links_versions = self.evaluate_links(
link_evaluator,
links=collected_links.find_links,
)
page_versions = []
for project_url in collected_links.project_urls:
package_links = self.process_project_url(
project_url, link_evaluator=link_evaluator,
)
page_versions.extend(package_links)
file_versions = self.evaluate_links(
link_evaluator,
links=collected_links.files,
)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.link.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return file_versions + find_links_versions + page_versions
def make_candidate_evaluator(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object to use.
"""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> BestCandidateResult
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
def _format_versions(cand_iter):
# type: (Iterable[InstallationCandidate]) -> str
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return ", ".join(sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)) or "none"
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
'No matching distribution found for {}'.format(
req)
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment, canonical_name):
# type: (str, str) -> int
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment, canonical_name):
# type: (str, str) -> Optional[str]
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
split line to please linter
"""Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import functools
import logging
import re
from typing import TYPE_CHECKING
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
from pip._internal.utils.urls import url_to_path
if TYPE_CHECKING:
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.version import _BaseVersion
from pip._internal.index.collector import LinkCollector
from pip._internal.models.search_scope import SearchScope
from pip._internal.req import InstallRequirement
from pip._internal.utils.hashes import Hashes
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = (
Tuple[int, int, int, _BaseVersion, BuildTag, Optional[int]]
)
__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder']
logger = logging.getLogger(__name__)
def _check_link_requires_python(
link, # type: Link
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> bool
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python, link,
)
else:
if not is_compatible:
version = '.'.join(map(str, version_info))
if not ignore_requires_python:
logger.debug(
'Link requires a different Python (%s not in: %r): %s',
version, link.requires_python, link,
)
return False
logger.debug(
'Ignoring failed Requires-Python check (%s not in: %r) '
'for link: %s',
version, link.requires_python, link,
)
return True
class LinkEvaluator:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name, # type: str
canonical_name, # type: str
formats, # type: FrozenSet[str]
target_python, # type: TargetPython
allow_yanked, # type: bool
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link):
# type: (Link) -> Tuple[bool, Optional[str]]
"""
Determine whether a link is a candidate for installation.
:return: A tuple (is_candidate, result), where `result` is (1) a
version string if `is_candidate` is True, and (2) if
`is_candidate` is False, an optional string to log the reason
the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or '<none given>'
return (False, f'yanked for reason: {reason}')
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (False, 'not a file')
if ext not in SUPPORTED_EXTENSIONS:
return (False, f'unsupported archive format: {ext}')
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = 'No binaries permitted for {}'.format(
self.project_name)
return (False, reason)
if "macosx10" in link.path and ext == '.zip':
return (False, 'macosx10 one')
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (False, 'invalid wheel filename')
if canonicalize_name(wheel.name) != self._canonical_name:
reason = 'wrong project name (not {})'.format(
self.project_name)
return (False, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = wheel.get_formatted_file_tags()
reason = (
"none of the wheel's tags ({}) are compatible "
"(run pip debug --verbose to show compatible tags)".format(
', '.join(file_tags)
)
)
return (False, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f'No sources permitted for {self.project_name}'
return (False, reason)
if not version:
version = _extract_version_from_fragment(
egg_info, self._canonical_name,
)
if not version:
reason = f'Missing project version for {self.project_name}'
return (False, reason)
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (False, 'Python version is incorrect')
supports_python = _check_link_requires_python(
link, version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
# Return None for the reason text to suppress calling
# _log_skipped_link().
return (False, None)
logger.debug('Found link %s, version: %s', link, version)
return (True, version)
def filter_unallowed_hashes(
candidates, # type: List[InstallationCandidate]
hashes, # type: Hashes
project_name, # type: str
):
# type: (...) -> List[InstallationCandidate]
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
'Given no hashes to check %s links for project %r: '
'discarding no candidates',
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = 'discarding no candidates'
else:
discard_message = 'discarding {} non-matches:\n {}'.format(
len(non_matches),
'\n '.join(str(candidate.link) for candidate in non_matches)
)
logger.debug(
'Checked %s links for project %r against %s hashes '
'(%s matches, %s no digest): %s',
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message
)
return filtered
class CandidatePreferences:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
):
# type: (...) -> None
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates, # type: List[InstallationCandidate]
applicable_candidates, # type: List[InstallationCandidate]
best_candidate, # type: Optional[InstallationCandidate]
):
# type: (...) -> None
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through all candidates.
"""
return iter(self._candidates)
def iter_applicable(self):
# type: () -> Iterable[InstallationCandidate]
"""Iterate through the applicable candidates.
"""
return iter(self._applicable_candidates)
class CandidateEvaluator:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name, # type: str
target_python=None, # type: Optional[TargetPython]
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name, # type: str
supported_tags, # type: List[Tag]
specifier, # type: specifiers.BaseSpecifier
prefer_binary=False, # type: bool
allow_all_prereleases=False, # type: bool
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> None
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
def get_applicable_candidates(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> List[InstallationCandidate]
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v) for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [
c for c in candidates if str(c.version) in versions
]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate):
# type: (InstallationCandidate) -> CandidateSortingKey
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag = () # type: BuildTag
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
if not wheel.supported(valid_tags):
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
pri = -(wheel.support_index_min(valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash, yank_value, binary_preference, candidate.version,
build_tag, pri,
)
def sort_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> Optional[InstallationCandidate]
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates, # type: List[InstallationCandidate]
):
# type: (...) -> BestCandidateResult
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector, # type: LinkCollector
target_python, # type: TargetPython
allow_yanked, # type: bool
format_control=None, # type: Optional[FormatControl]
candidate_prefs=None, # type: CandidatePreferences
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> None
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links = set() # type: Set[Link]
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector, # type: LinkCollector
selection_prefs, # type: SelectionPreferences
target_python=None, # type: Optional[TargetPython]
):
# type: (...) -> PackageFinder
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
)
@property
def target_python(self):
# type: () -> TargetPython
return self._target_python
@property
def search_scope(self):
# type: () -> SearchScope
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope):
# type: (SearchScope) -> None
self._link_collector.search_scope = search_scope
@property
def find_links(self):
# type: () -> List[str]
return self._link_collector.find_links
@property
def index_urls(self):
# type: () -> List[str]
return self.search_scope.index_urls
@property
def trusted_hosts(self):
# type: () -> Iterable[str]
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self):
# type: () -> bool
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self):
# type: () -> None
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self):
# type: () -> bool
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self):
# type: () -> None
self._candidate_prefs.prefer_binary = True
def make_link_evaluator(self, project_name):
# type: (str) -> LinkEvaluator
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links):
# type: (Iterable[Link]) -> List[Link]
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set() # type: Set[Link]
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link, reason):
# type: (Link, str) -> None
if link not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug('Skipping link: %s: %s', reason, link)
self._logged_links.add(link)
def get_install_candidate(self, link_evaluator, link):
# type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
is_candidate, result = link_evaluator.evaluate_link(link)
if not is_candidate:
if result:
self._log_skipped_link(link, reason=result)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=result,
)
def evaluate_links(self, link_evaluator, links):
# type: (LinkEvaluator, Iterable[Link]) -> List[InstallationCandidate]
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(self, project_url, link_evaluator):
# type: (Link, LinkEvaluator) -> List[InstallationCandidate]
logger.debug(
'Fetching project page and analyzing links: %s', project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name):
# type: (str) -> List[InstallationCandidate]
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
collected_links = self._link_collector.collect_links(project_name)
link_evaluator = self.make_link_evaluator(project_name)
find_links_versions = self.evaluate_links(
link_evaluator,
links=collected_links.find_links,
)
page_versions = []
for project_url in collected_links.project_urls:
package_links = self.process_project_url(
project_url, link_evaluator=link_evaluator,
)
page_versions.extend(package_links)
file_versions = self.evaluate_links(
link_evaluator,
links=collected_links.files,
)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.link.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return file_versions + find_links_versions + page_versions
def make_candidate_evaluator(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> CandidateEvaluator
"""Create a CandidateEvaluator object to use.
"""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name, # type: str
specifier=None, # type: Optional[specifiers.BaseSpecifier]
hashes=None, # type: Optional[Hashes]
):
# type: (...) -> BestCandidateResult
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
def _format_versions(cand_iter):
# type: (Iterable[InstallationCandidate]) -> str
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return ", ".join(sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)) or "none"
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
'No matching distribution found for {}'.format(
req)
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment, canonical_name):
# type: (str, str) -> int
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment, canonical_name):
# type: (str, str) -> Optional[str]
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
|
# -*- coding: utf8 -*-
import json
import urllib
from django.test import client
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import test_utils
import amo
from amo.urlresolvers import reverse
from search.tests import SphinxTestCase
from search import views
from search.client import SearchError
from addons.models import Addon, Category
from tags.models import Tag
def test_parse_bad_type():
"""
Given a type that doesn't exist, we should not throw a KeyError.
Note: This does not require sphinx to be running.
"""
c = client.Client()
try:
c.get("/en-US/firefox/api/1.2/search/firebug%20type:dict")
except KeyError: # pragma: no cover
assert False, ("We should not throw a KeyError just because we had a "
"nonexistent addon type.")
class PersonaSearchTest(SphinxTestCase):
fixtures = ['base/apps', 'addons/persona']
def get_response(self, **kwargs):
return self.client.get(reverse('search.search') +
'?' + urllib.urlencode(kwargs))
def test_default_personas_query(self):
r = self.get_response(cat='personas')
doc = pq(r.content)
eq_(doc('title').text(),
'Personas Search Results :: Add-ons for Firefox')
eq_(len(doc('.secondary .categories h3')), 1)
eq_(doc('.primary h3').text(), '1 Persona')
eq_(len(doc('.persona-preview')), 1)
eq_(doc('.thumbnails h4').text(), 'My Persona')
eq_(doc('.thumbnails em').text(), '55 active daily users')
class FrontendSearchTest(SphinxTestCase):
def setUp(self):
# Warms up the prefixer.
self.client.get('/')
super(FrontendSearchTest, self).setUp()
def get_response(self, **kwargs):
return self.client.get(reverse('search.search') +
'?' + urllib.urlencode(kwargs))
def test_xss(self):
"""Inputs should be escaped so people don't XSS."""
r = self.get_response(q='><strong>My Balls</strong>')
doc = pq(r.content)
eq_(len([1 for a in doc('strong') if a.text == 'My Balls']), 0)
def test_tag_xss(self):
"""Test that the tag params are escaped as well."""
r = self.get_response(tag="<script>alert('balls')</script>")
self.assertNotContains(r, "<script>alert('balls')</script>")
def test_default_query(self):
"""Verify some expected things on a query for nothing."""
resp = self.get_response()
doc = pq(resp.content)
num_actual_results = len(Addon.objects.filter(
versions__apps__application=amo.FIREFOX.id,
versions__files__gt=0))
# Verify that we have the expected number of results.
eq_(doc('.item').length, num_actual_results)
# We should count the number of expected results and match.
eq_(doc('h3.results-count').text(), "Showing 1 - %d of %d results"
% (num_actual_results, num_actual_results, ))
# Verify that we have the Refine Results.
eq_(doc('.secondary .highlight h3').length, 1)
def test_default_collections_query(self):
r = self.get_response(cat='collections')
doc = pq(r.content)
eq_(doc('title').text(),
'Collection Search Results :: Add-ons for Firefox')
def test_basic_query(self):
"Test a simple query"
resp = self.get_response(q='delicious')
doc = pq(resp.content)
el = doc('title')[0].text_content().strip()
eq_(el, 'Search for delicious :: Add-ons for Firefox')
def test_redirection(self):
resp = self.get_response(appid=18)
self.assertRedirects(resp, '/en-US/thunderbird/search/?appid=18')
def test_last_updated(self):
"""
Verify that we have no new things in the last day.
"""
resp = self.get_response(lup='1 day ago')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_category(self):
"""
Verify that we have nothing in category 72.
"""
resp = self.get_response(cat='1,72')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_addontype(self):
resp = self.get_response(atype=amo.ADDON_LPAPP)
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_version_selected(self):
"The selected version should match the lver param."
resp = self.get_response(lver='3.6')
doc = pq(resp.content)
el = doc('#refine-compatibility li.selected')[0].text_content().strip()
eq_(el, '3.6')
def test_empty_version_selected(self):
"""If a user filters by a version that has no results, that version
should show up on the filter list anyway."""
resp = self.get_response(lver='3.7', q='Bookmarks')
doc = pq(resp.content)
el = doc('#refine-compatibility li.selected').text().strip()
eq_(el, '3.7')
def test_sort_newest(self):
"Test that we selected the right sort."
resp = self.get_response(sort='newest')
doc = pq(resp.content)
el = doc('.listing-header li.selected')[0].text_content().strip()
eq_(el, 'Newest')
def test_sort_default(self):
"Test that by default we're sorting by Keyword Search"
resp = self.get_response()
doc = pq(resp.content)
els = doc('.listing-header li.selected')
eq_(len(els), 1, "No selected sort :(")
eq_(els[0].text_content().strip(), 'Keyword Match')
def test_sort_bad(self):
"Test that a bad sort value won't bring the system down."
self.get_response(sort='yermom')
def test_non_existent_tag(self):
"""
If you are searching for a tag that doesn't exist we shouldn't return
any results.
"""
resp = self.get_response(tag='stockholmsyndrome')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_themes_in_results(self):
"""Many themes have platform ids that aren't 1, we should make sure we
are returning them."""
resp = self.get_response(q='grapple')
doc = pq(resp.content)
eq_('GrApple Yummy', doc('.item h3 a').text())
def test_tag_refinement(self):
"""Don't show the tag list if there's no tags to be shown."""
r = self.get_response(q='vuvuzela')
doc = pq(r.content)
eq_(len(doc('.addon-tags')), 0)
def test_pagination_refinement(self):
"""Refinement panel shouldn't have the page parameter in urls."""
r = self.get_response(page=2)
doc = pq(r.content)
for link in doc('#refine-results a'):
assert 'page=2' not in link.attrib['href'], (
"page parameter found in %s link" % link.text)
def test_version_refinement(self):
"""For a particular addon, ensure that if we search for it, we get the
full range of versions for refinement purposes."""
# Firebug is 3.0-3.6 compatible
r = self.get_response(q='firebug fingertips')
doc = pq(r.content)
eq_([a.text for a in doc(r.content)('#refine-compatibility a')],
['All Versions', '3.6', '3.5', '3.0'])
def test_bad_cat(self):
r = self.get_response(cat='1)f,ff')
eq_(r.status_code, 200)
class ViewTest(test_utils.TestCase):
"""Tests some of the functions used in building the view."""
fixtures = ['base/fixtures']
def setUp(self):
self.fake_request = Mock()
self.fake_request.get_full_path = lambda: 'http://fatgir.ls/'
def test_get_categories(self):
cats = Category.objects.all()
cat = cats[0].id
# Select a category.
items = views._get_categories(self.fake_request, cats, category=cat)
eq_(len(cats), len(items[1].children))
assert any((i.selected for i in items[1].children))
# Select an addon type.
atype = cats[0].type
items = views._get_categories(self.fake_request, cats,
addon_type=atype)
assert any((i.selected for i in items))
def test_get_tags(self):
t = Tag(tag_text='yermom')
assert views._get_tags(self.fake_request, tags=[t], selected='yermom')
class AjaxTest(SphinxTestCase):
fixtures = ['base/fixtures']
def test_json(self):
r = self.client.get(reverse('search.ajax') + '?q=del')
data = json.loads(r.content)
check = lambda x, y: eq_(data[0][val], expected)
addon = Addon.objects.get(pk=3615)
check_me = (
('id', addon.id),
('icon', addon.icon_url),
('label', unicode(addon.name)),
('value', unicode(addon.name).lower())
)
for val, expected in check_me:
check(val, expected)
@patch('search.client.Client.query')
def test_errors(self, searchclient):
searchclient.side_effect = SearchError()
r = self.client.get(reverse('search.ajax') + '?q=del')
eq_('[]', r.content)
Fix test.
# -*- coding: utf8 -*-
import json
import urllib
from django.test import client
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import test_utils
import amo
from amo.urlresolvers import reverse
from search.tests import SphinxTestCase
from search import views
from search.client import SearchError
from addons.models import Addon, Category
from tags.models import Tag
def test_parse_bad_type():
"""
Given a type that doesn't exist, we should not throw a KeyError.
Note: This does not require sphinx to be running.
"""
c = client.Client()
try:
c.get("/en-US/firefox/api/1.2/search/firebug%20type:dict")
except KeyError: # pragma: no cover
assert False, ("We should not throw a KeyError just because we had a "
"nonexistent addon type.")
class PersonaSearchTest(SphinxTestCase):
fixtures = ['base/apps', 'addons/persona']
def get_response(self, **kwargs):
return self.client.get(reverse('search.search') +
'?' + urllib.urlencode(kwargs))
def test_default_personas_query(self):
r = self.get_response(cat='personas')
doc = pq(r.content)
eq_(doc('title').text(),
'Personas Search Results :: Add-ons for Firefox')
eq_(len(doc('.secondary .categories h3')), 1)
eq_(doc('.primary h3').text(), '1 Persona')
eq_(len(doc('.persona-preview')), 1)
eq_(doc('.thumbnails h4').text(), 'My Persona')
eq_(doc('.thumbnails em').text(), '55 active daily users')
class FrontendSearchTest(SphinxTestCase):
def setUp(self):
# Warms up the prefixer.
self.client.get('/')
super(FrontendSearchTest, self).setUp()
def get_response(self, **kwargs):
return self.client.get(reverse('search.search') +
'?' + urllib.urlencode(kwargs))
def test_xss(self):
"""Inputs should be escaped so people don't XSS."""
r = self.get_response(q='><strong>My Balls</strong>')
doc = pq(r.content)
eq_(len([1 for a in doc('strong') if a.text == 'My Balls']), 0)
def test_tag_xss(self):
"""Test that the tag params are escaped as well."""
r = self.get_response(tag="<script>alert('balls')</script>")
self.assertNotContains(r, "<script>alert('balls')</script>")
def test_default_query(self):
"""Verify some expected things on a query for nothing."""
resp = self.get_response()
doc = pq(resp.content)
num_actual_results = len(Addon.objects.filter(
versions__apps__application=amo.FIREFOX.id,
versions__files__gt=0))
# Verify that we have the expected number of results.
eq_(doc('.item').length, num_actual_results)
# We should count the number of expected results and match.
eq_(doc('h3.results-count').text(), "Showing 1 - %d of %d results"
% (num_actual_results, num_actual_results, ))
# Verify that we have the Refine Results.
eq_(doc('.secondary .highlight h3').length, 1)
def test_default_collections_query(self):
r = self.get_response(cat='collections')
doc = pq(r.content)
eq_(doc('title').text(),
'Collection Search Results :: Add-ons for Firefox')
def test_basic_query(self):
"Test a simple query"
resp = self.get_response(q='delicious')
doc = pq(resp.content)
el = doc('title')[0].text_content().strip()
eq_(el, 'Search for delicious :: Add-ons for Firefox')
def test_redirection(self):
resp = self.get_response(appid=18)
self.assertRedirects(resp, '/en-US/thunderbird/search/?appid=18')
def test_last_updated(self):
"""
Verify that we have no new things in the last day.
"""
resp = self.get_response(lup='1 day ago')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_category(self):
"""
Verify that we have nothing in category 72.
"""
resp = self.get_response(cat='1,72')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_addontype(self):
resp = self.get_response(atype=amo.ADDON_LPAPP)
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_version_selected(self):
"The selected version should match the lver param."
resp = self.get_response(lver='3.6')
doc = pq(resp.content)
el = doc('#refine-compatibility li.selected')[0].text_content().strip()
eq_(el, '3.6')
def test_empty_version_selected(self):
"""If a user filters by a version that has no results, that version
should show up on the filter list anyway."""
resp = self.get_response(lver='3.7', q='Bookmarks')
doc = pq(resp.content)
el = doc('#refine-compatibility li.selected').text().strip()
eq_(el, '3.7')
def test_sort_newest(self):
"Test that we selected the right sort."
resp = self.get_response(sort='newest')
doc = pq(resp.content)
el = doc('.listing-header li.selected')[0].text_content().strip()
eq_(el, 'Newest')
def test_sort_default(self):
"Test that by default we're sorting by Keyword Search"
resp = self.get_response()
doc = pq(resp.content)
els = doc('.listing-header li.selected')
eq_(len(els), 1, "No selected sort :(")
eq_(els[0].text_content().strip(), 'Keyword Match')
def test_sort_bad(self):
"Test that a bad sort value won't bring the system down."
self.get_response(sort='yermom')
def test_non_existent_tag(self):
"""
If you are searching for a tag that doesn't exist we shouldn't return
any results.
"""
resp = self.get_response(tag='stockholmsyndrome')
doc = pq(resp.content)
eq_(doc('.item').length, 0)
def test_themes_in_results(self):
"""Many themes have platform ids that aren't 1, we should make sure we
are returning them."""
resp = self.get_response(q='grapple')
doc = pq(resp.content)
eq_('GrApple Yummy', doc('.item h3 a').text())
def test_tag_refinement(self):
"""Don't show the tag list if there's no tags to be shown."""
r = self.get_response(q='vuvuzela')
doc = pq(r.content)
eq_(len(doc('.addon-tags')), 0)
def test_pagination_refinement(self):
"""Refinement panel shouldn't have the page parameter in urls."""
r = self.get_response(page=2)
doc = pq(r.content)
for link in doc('#refine-results a'):
assert 'page=2' not in link.attrib['href'], (
"page parameter found in %s link" % link.text)
def test_version_refinement(self):
"""For a particular addon, ensure that if we search for it, we get the
full range of versions for refinement purposes."""
# Firebug is 3.0-3.6 compatible
r = self.get_response(q='firebug fingertips')
doc = pq(r.content)
eq_([a.text for a in doc(r.content)('#refine-compatibility a')],
['All Versions', '3.6', '3.5', '3.0'])
def test_bad_cat(self):
r = self.get_response(cat='1)f,ff')
eq_(r.status_code, 200)
class ViewTest(test_utils.TestCase):
"""Tests some of the functions used in building the view."""
fixtures = ['base/fixtures']
def setUp(self):
self.fake_request = Mock()
self.fake_request.get_full_path = lambda: 'http://fatgir.ls/'
def test_get_categories(self):
cats = Category.objects.all()
cat = cats[0].id
# Select a category.
items = views._get_categories(self.fake_request, cats, category=cat)
eq_(len(cats), len(items[1].children))
assert any((i.selected for i in items[1].children))
# Select an addon type.
atype = cats[0].type
items = views._get_categories(self.fake_request, cats,
addon_type=atype)
assert any((i.selected for i in items))
def test_get_tags(self):
t = Tag(tag_text='yermom')
assert views._get_tags(self.fake_request, tags=[t], selected='yermom')
class AjaxTest(SphinxTestCase):
fixtures = ['base/fixtures']
def test_json(self):
r = self.client.get(reverse('search.ajax') + '?q=del')
data = json.loads(r.content)
check = lambda x, y: eq_(data[0][val], expected)
addon = Addon.objects.get(pk=3615)
check_me = (
('id', addon.id),
('icon', addon.icon_url),
('label', unicode(addon.name)),
('value', unicode(addon.name).lower())
)
for val, expected in check_me:
check(val, expected)
@patch('search.client.AddonsPersonasClient.query')
def test_errors(self, searchclient):
searchclient.side_effect = SearchError()
r = self.client.get(reverse('search.ajax') + '?q=del')
eq_('[]', r.content)
|
"""
Django settings for NTHU_Course project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g17y@qg*@kb+=o^f$mwn^^s4mnnfl5ofbo5lc1bf_d87n#ja4@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'bootstrapform',
'index',
'data_center',
'crawler',
'search',
'table',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'NTHU_Course.urls'
WSGI_APPLICATION = 'NTHU_Course.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
CONFIG_PATH = os.path.join(BASE_DIR, 'NTHU_Course/mysql.ini')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': CONFIG_PATH,
},
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
change debug=False
"""
Django settings for NTHU_Course project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g17y@qg*@kb+=o^f$mwn^^s4mnnfl5ofbo5lc1bf_d87n#ja4@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = [
'.nthu-course.cf',
'.localhost',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'haystack',
'bootstrapform',
'index',
'data_center',
'crawler',
'search',
'table',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'NTHU_Course.urls'
WSGI_APPLICATION = 'NTHU_Course.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
CONFIG_PATH = os.path.join(BASE_DIR, 'NTHU_Course/mysql.ini')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': CONFIG_PATH,
},
}
}
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
|
from unittest import TestCase
from cqlengine.statements import InsertStatement, StatementException, AssignmentClause
class InsertStatementTests(TestCase):
def test_where_clause_failure(self):
""" tests that where clauses cannot be added to Insert statements """
ist = InsertStatement('table', None)
with self.assertRaises(StatementException):
ist.add_where_clause('s')
def test_statement(self):
ist = InsertStatement('table', None)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
self.assertEqual(
unicode(ist),
'INSERT INTO table ("a", "c") VALUES (:0, :1)'
)
def test_additional_rendering(self):
self.fail("Implement ttl and consistency")
adding test around insert ttl
from unittest import TestCase
from cqlengine.statements import InsertStatement, StatementException, AssignmentClause
class InsertStatementTests(TestCase):
def test_where_clause_failure(self):
""" tests that where clauses cannot be added to Insert statements """
ist = InsertStatement('table', None)
with self.assertRaises(StatementException):
ist.add_where_clause('s')
def test_statement(self):
ist = InsertStatement('table', None)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
self.assertEqual(
unicode(ist),
'INSERT INTO table ("a", "c") VALUES (:0, :1)'
)
def test_additional_rendering(self):
ist = InsertStatement('table', ttl=60)
ist.add_assignment_clause(AssignmentClause('a', 'b'))
ist.add_assignment_clause(AssignmentClause('c', 'd'))
self.assertIn('USING TTL 60', unicode(ist))
|
"""
Main module to load the data
"""
# pylint: disable=C0330
storage = {'parquet': 'STORED AS PARQUET',
'com.databricks.spark.csv': ("ROW FORMAT SERDE"
"'org.apache.hadoop.hive.serde2.OpenCSVSerde'\n"
"STORED AS TEXTFILE")}
def add_partition_column(df, partition_col='dt', partition_with=None):
"""
Return a new dataframe with a column added, to be used as partition column
If `partition_col` already exists and `partition_with` is `None`, the dataframe is unmodified
:param df: A dataframe to add a partition columnn
:type df: A Spark dataframe
:param partition_col str: On which column should it be partitioned
:param partition_with: A Spark Column expression for the `partition_col`. If not present,
`partition_col` should already be in the input data
:returns: A Spark dataframe with the partition column added
"""
if partition_with is None:
if partition_col not in df.columns:
raise ValueError(("The partition_function can't be None "
"if partition_col is not part of the dataframe"))
else:
return df
return df.withColumn(partition_col, partition_with)
def sanitize(key):
"""
Sanitize column names (they cannot begin with '_') by surrounding them with backticks (`)
"""
if key[0] == "_":
return "`%s`" % key
else:
return key
def create_schema(df, database, table, partition_col='dt', format_output='parquet'):
"""
Create the schema (as a SQL string) for the dataframe in question
The `format_output` is needed as this has to be specified in the create statement
:param df: The dataframe that has been written partitioned on "disk"
:type df: A Spark dataframe
:param database str: To which database does the table belong
:param table str: On which tables has this been written to
:param partition_col str: On which column should it be partitioned
:param format_output str: What format should the table use.
"""
init_string = "CREATE TABLE IF NOT EXISTS %s.%s (\n" % (database, table)
mid_string = ",\n".join([sanitize(key) + " " + value
for key, value in df.dtypes
if value != partition_col])
end_string = "\n) PARTITIONED BY (%s STRING) %s" % (partition_col,
storage[format_output])
return init_string + mid_string + end_string
def create_partitions(spark, df, database, table, partition_col='dt'):
"""
Create the partitions on the metastore (not on "disk").
This is necessary as when writing the data on "disk", the metastore is not yet aware that
partitions exists.
:param spark: A SparkSession
:type spark: :class:`pyspark.sql.SparkSession`
:param df: The dataframe that has been written partitioned on "disk"
:type df: A Spark dataframe
:param database str: To which database does the table belong
:param table str: On which tables has this been written to
:param partition_col str: On which column should it be partitioned
"""
unique_partitions = df.select(partition_col).distinct()
for row in unique_partitions.collect():
partition_value = row[partition_col]
conf = {"partition": partition_col,
"dbtable": ".".join([database, table]),
"partition_value": partition_value}
spark.sql("""ALTER TABLE %(dbtable)s ADD IF NOT EXISTS
PARTITION(%(partition)s=%(partition_value)s)""" % conf)
def load_data(spark, path, **kwargs):
r"""
Load the data in `path` as a Spark DataFrame
:param spark: A SparkSession object
:param path str: The path where the data is
:return: A Spark DataFrame
:Keywords Argument:
* *format* (``str``) --
The format the data will be in. All options supported by Spark. Parquet is the default.
* *header* (``bool``) --
If reading a csv file, this will tell if the header is present (and use the schema)
* *schema* (``pyspark.sql.types.StructType``)
The input schema
* *key* (``str``)
In principle all `values` if they `key` is accepted by `spark.read.options` or by
`findspark.init()`
"""
readr = spark.read.options(**kwargs)
for key, value in kwargs.items():
try:
readr = getattr(readr, key)(value)
except AttributeError:
pass
df = (readr.load(path))
return df
def create_spark_session(database='not provided', table='not provided', **kwargs):
r"""
Returns a Spark session.
:param database str: The database name. Only used to name the SparkSession
:param table str: The table name. Only used to name the SparkSession
:Keyword Arguments:
* *key* (``str``) --
All arguments valid for SparkSession.builder, such as `master=local`
"""
try:
from pyspark.sql import SparkSession
except ImportError:
import findspark
findspark.init(spark_home=kwargs.get('spark_home'),
python_path=kwargs.get('python_path'))
from pyspark.sql import SparkSession
builder = SparkSession.builder
for key, value in kwargs.items():
builder = builder.config(key, value)
spark = (builder.enableHiveSupport()
.appName(" ".join([__file__, "Database:", database, "Table:", table]))
.getOrCreate())
return spark
def get_output_path(spark, database, table):
"""
Return the path where data should be written using database and table as argument
"""
result = (spark.sql("describe formatted %s.%s" % (database, table))
.where("col_name='Location:'")
.collect())
location = result[0]['data_type']
return location
def write_data(df, format_output, mode_output, partition_col, output_path, **kwargs):
"""
pass
"""
if kwargs.get('repartition', False):
result = df.repartition(partition_col)
else:
result = df
(result.write
.format(format_output)
.mode(mode_output)
.partitionBy(partition_col)
.save(output_path))
def main(input_path, format_output, database, table, mode_output='append',
partition_col='dt', partition_with=None, spark=None, **kwargs):
r"""
:param input_path str: The location for the data to load. Passed to `.load` in Spark
:param format_output str: One of `parquet` and `com.databricks.spark.csv` at the moment
:param database str: The Hive database where to write the output
:param table str: The Hive table where to write the output
:param mode_output str: Anything accepted by Spark's `.write.mode()`.
:param partition_col str: The partition column
:param partition_function: A Spark Column expression for the `partition_col`. If not present,
`partition_col` should already be in the input data
:Keyword Arguments:
* *spark_config* (``dict``) --
This dictionaries contains options to be passed when building a `SparkSession` (for example
`{'master': 'yarn'}`)
* *format* (``str``) --
The format the data will be in. All options supported by Spark. Parquet is the default.
* *header* (``bool``) --
If reading a csv file, this will tell if the header is present (and use the schema)
* *schema* (``pyspark.sql.types.StructType``)
The input schema
* *master* (``str``) --
Specify which `master` should be used
* *repartition* (``bool``) --
Whether to partition the data by partition column beforer writing. This reduces the number
of small files written by Spark
* *key* (``str``)
In principle all `key` if accepted by `spark.read.options`, by `findspark.init()`, or by
`SparkSession.builder.config`
:Example:
>>> import pyspark.sql.functions as sf
>>> column = 'a_column_with_unixtime'
>>> partition_function = lambda column: sf.from_unixtime(sf.col(column), fmt='yyyy-MM-dd')
>>> from spark_partitionr import main
>>> main('hdfs:///data/some_data', 'parquet', 'my_db', 'my_tbl', mode_output='overwrite',
partition_col='dt', partition_with=partition_function('a_col'),
master='yarn', format='com.databricks.spark.csv',
header=True)
"""
if not spark:
spark = create_spark_session(database, table, **kwargs)
df = load_data(spark, input_path, **kwargs)
schema = create_schema(df, database, table, partition_col, format_output)
spark.sql(schema)
partitioned_df = add_partition_column(df, partition_col, partition_with)
create_partitions(spark, partitioned_df, database, table, partition_col)
output_path = get_output_path(spark, database, table)
write_data(partitioned_df, format_output, mode_output, partition_col, output_path, **kwargs)
Add option to un-nest fields
This is especially useful when Hive is not able to handle very nested
fields (see https://issues.apache.org/jira/browse/HIVE-15249 for
example).
"""
Main module to load the data
"""
# pylint: disable=C0330
storage = {'parquet': 'STORED AS PARQUET',
'com.databricks.spark.csv': ("ROW FORMAT SERDE"
"'org.apache.hadoop.hive.serde2.OpenCSVSerde'\n"
"STORED AS TEXTFILE")}
def add_partition_column(df, partition_col='dt', partition_with=None):
"""
Return a new dataframe with a column added, to be used as partition column
If `partition_col` already exists and `partition_with` is `None`, the dataframe is unmodified
:param df: A dataframe to add a partition columnn
:type df: A Spark dataframe
:param partition_col str: On which column should it be partitioned
:param partition_with: A Spark Column expression for the `partition_col`. If not present,
`partition_col` should already be in the input data
:returns: A Spark dataframe with the partition column added
"""
if partition_with is None:
if partition_col not in df.columns:
raise ValueError(("The partition_function can't be None "
"if partition_col is not part of the dataframe"))
else:
return df
return df.withColumn(partition_col, partition_with)
def sanitize(key):
"""
Sanitize column names (they cannot begin with '_') by surrounding them with backticks (`)
"""
if key[0] == "_":
return "`%s`" % key
else:
return key
def create_schema(df, database, table, partition_col='dt', format_output='parquet'):
"""
Create the schema (as a SQL string) for the dataframe in question
The `format_output` is needed as this has to be specified in the create statement
:param df: The dataframe that has been written partitioned on "disk"
:type df: A Spark dataframe
:param database str: To which database does the table belong
:param table str: On which tables has this been written to
:param partition_col str: On which column should it be partitioned
:param format_output str: What format should the table use.
"""
init_string = "CREATE TABLE IF NOT EXISTS %s.%s (\n" % (database, table)
mid_string = ",\n".join([sanitize(key) + " " + value
for key, value in df.dtypes
if value != partition_col])
end_string = "\n) PARTITIONED BY (%s STRING) %s" % (partition_col,
storage[format_output])
return init_string + mid_string + end_string
def create_partitions(spark, df, database, table, partition_col='dt'):
"""
Create the partitions on the metastore (not on "disk").
This is necessary as when writing the data on "disk", the metastore is not yet aware that
partitions exists.
:param spark: A SparkSession
:type spark: :class:`pyspark.sql.SparkSession`
:param df: The dataframe that has been written partitioned on "disk"
:type df: A Spark dataframe
:param database str: To which database does the table belong
:param table str: On which tables has this been written to
:param partition_col str: On which column should it be partitioned
"""
unique_partitions = df.select(partition_col).distinct()
for row in unique_partitions.collect():
partition_value = row[partition_col]
conf = {"partition": partition_col,
"dbtable": ".".join([database, table]),
"partition_value": partition_value}
spark.sql("""ALTER TABLE %(dbtable)s ADD IF NOT EXISTS
PARTITION(%(partition)s=%(partition_value)s)""" % conf)
def load_data(spark, path, **kwargs):
r"""
Load the data in `path` as a Spark DataFrame
:param spark: A SparkSession object
:param path str: The path where the data is
:return: A Spark DataFrame
:Keywords Argument:
* *format* (``str``) --
The format the data will be in. All options supported by Spark. Parquet is the default.
* *header* (``bool``) --
If reading a csv file, this will tell if the header is present (and use the schema)
* *schema* (``pyspark.sql.types.StructType``)
The input schema
* *key* (``str``)
In principle all `values` if they `key` is accepted by `spark.read.options` or by
`findspark.init()`
"""
readr = spark.read.options(**kwargs)
for key, value in kwargs.items():
try:
readr = getattr(readr, key)(value)
except AttributeError:
pass
df = (readr.load(path))
return df
def create_spark_session(database='not provided', table='not provided', **kwargs):
r"""
Returns a Spark session.
:param database str: The database name. Only used to name the SparkSession
:param table str: The table name. Only used to name the SparkSession
:Keyword Arguments:
* *key* (``str``) --
All arguments valid for SparkSession.builder, such as `master=local`
"""
try:
from pyspark.sql import SparkSession
except ImportError:
import findspark
findspark.init(spark_home=kwargs.get('spark_home'),
python_path=kwargs.get('python_path'))
from pyspark.sql import SparkSession
builder = SparkSession.builder
for key, value in kwargs.items():
builder = builder.config(key, value)
spark = (builder.enableHiveSupport()
.appName(" ".join([__file__, "Database:", database, "Table:", table]))
.getOrCreate())
return spark
def get_output_path(spark, database, table):
"""
Return the path where data should be written using database and table as argument
"""
result = (spark.sql("describe formatted %s.%s" % (database, table))
.where("col_name='Location:'")
.collect())
location = result[0]['data_type']
return location
def write_data(df, format_output, mode_output, partition_col, output_path, **kwargs):
"""
pass
"""
if kwargs.get('repartition', False):
result = df.repartition(partition_col)
else:
result = df
(result.write
.format(format_output)
.mode(mode_output)
.partitionBy(partition_col)
.save(output_path))
def main(input_path, format_output, database, table, mode_output='append',
partition_col='dt', partition_with=None, spark=None, **kwargs):
r"""
:param input_path str: The location for the data to load. Passed to `.load` in Spark
:param format_output str: One of `parquet` and `com.databricks.spark.csv` at the moment
:param database str: The Hive database where to write the output
:param table str: The Hive table where to write the output
:param mode_output str: Anything accepted by Spark's `.write.mode()`.
:param partition_col str: The partition column
:param partition_function: A Spark Column expression for the `partition_col`. If not present,
`partition_col` should already be in the input data
:Keyword Arguments:
* *spark_config* (``dict``) --
This dictionaries contains options to be passed when building a `SparkSession` (for example
`{'master': 'yarn'}`)
* *format* (``str``) --
The format the data will be in. All options supported by Spark. Parquet is the default.
* *header* (``bool``) --
If reading a csv file, this will tell if the header is present (and use the schema)
* *schema* (``pyspark.sql.types.StructType``)
The input schema
* *master* (``str``) --
Specify which `master` should be used
* *repartition* (``bool``) --
Whether to partition the data by partition column beforer writing. This reduces the number
of small files written by Spark
* *to_unnest* (``list``) --
Which Struct's, if any, should be unnested as columns. This is helpful for the cases when
a field is too deeply nested that it exceeds the maximum lenght supported by Hive
* *key* (``str``)
In principle all `key` if accepted by `spark.read.options`, by `findspark.init()`, or by
`SparkSession.builder.config`
:Example:
>>> import pyspark.sql.functions as sf
>>> column = 'a_column_with_unixtime'
>>> partition_function = lambda column: sf.from_unixtime(sf.col(column), fmt='yyyy-MM-dd')
>>> from spark_partitionr import main
>>> main('hdfs:///data/some_data', 'parquet', 'my_db', 'my_tbl', mode_output='overwrite',
partition_col='dt', partition_with=partition_function('a_col'),
master='yarn', format='com.databricks.spark.csv',
header=True, to_unnest=['deeply_nested_column'])
"""
if not spark:
spark = create_spark_session(database, table, **kwargs)
df = load_data(spark, input_path, **kwargs)
to_unnest = kwargs.get('to_unnest')
if to_unnest:
for el in to_unnest:
df = df.select('%s.*' % el, *df.columns).drop(el)
schema = create_schema(df, database, table, partition_col, format_output)
spark.sql(schema)
partitioned_df = add_partition_column(df, partition_col, partition_with)
create_partitions(spark, partitioned_df, database, table, partition_col)
output_path = get_output_path(spark, database, table)
write_data(partitioned_df, format_output, mode_output, partition_col, output_path, **kwargs)
|
# -*- coding: utf-8 -*-
from scout.commands import cli
from scout.server.extensions import store
def test_update_institute(mock_app):
"""Tests the CLI that updates an institute"""
runner = mock_app.test_cli_runner()
assert runner
# Test CLI base, no arguments provided
result = runner.invoke(cli, ["update", "institute"])
# it should return error message
assert 'Error: Missing argument "institute-id"' in result.output
# Test CLI passing institute id that is not in database
result = runner.invoke(cli, ["update", "institute", "cust666"])
# it should return error message
assert "WARNING Institute cust666 does not exist in database" in result.output
# original institute in database
institute_obj = store.institute_collection.find_one()
updates = {
"coverage_cutoff": 15,
"frequency_cutoff": 0.05,
"display_name": "updated_name",
"sanger_recipients": None,
}
# Test CLI to update coverage cutoff
result = runner.invoke(
cli, ["update", "institute", "cust000", "-c", updates["coverage_cutoff"]]
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to update display_name
result = runner.invoke(cli, ["update", "institute", "cust000", "-d", updates["display_name"]])
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to update frequency_cutoff
result = runner.invoke(
cli, ["update", "institute", "cust000", "-f", updates["frequency_cutoff"]]
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to remove a sanger recipient
result = runner.invoke(
cli,
["update", "institute", "cust000", "-r", institute_obj["sanger_recipients"][0]],
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# check that updates were really performed on database:
updated_institute = store.institute_collection.find_one()
for key in updates.keys():
assert institute_obj[key] != updated_institute[key]
# Test CLI to update sanger recipients
result = runner.invoke(
cli,
["update", "institute", "cust000", "-s", institute_obj["sanger_recipients"][0]],
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# make sure that recipient has been introduced
updated_institute = store.institute_collection.find_one()
# updated sanger recipients should be equal but in reversed order
# to recipients in original institute object
assert updated_institute["sanger_recipients"] == institute_obj["sanger_recipients"][::-1]
Fix test_update_institute
# -*- coding: utf-8 -*-
from scout.commands import cli
from scout.server.extensions import store
def test_update_institute(mock_app):
"""Tests the CLI that updates an institute"""
runner = mock_app.test_cli_runner()
assert runner
# Test CLI base, no arguments provided
result = runner.invoke(cli, ["update", "institute"])
# it should return error message
assert "Error: Missing argument" in result.output
# Test CLI passing institute id that is not in database
result = runner.invoke(cli, ["update", "institute", "cust666"])
# it should return error message
assert "WARNING Institute cust666 does not exist in database" in result.output
# original institute in database
institute_obj = store.institute_collection.find_one()
updates = {
"coverage_cutoff": 15,
"frequency_cutoff": 0.05,
"display_name": "updated_name",
"sanger_recipients": None,
}
# Test CLI to update coverage cutoff
result = runner.invoke(
cli, ["update", "institute", "cust000", "-c", updates["coverage_cutoff"]]
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to update display_name
result = runner.invoke(cli, ["update", "institute", "cust000", "-d", updates["display_name"]])
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to update frequency_cutoff
result = runner.invoke(
cli, ["update", "institute", "cust000", "-f", updates["frequency_cutoff"]]
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# Test CLI to remove a sanger recipient
result = runner.invoke(
cli, ["update", "institute", "cust000", "-r", institute_obj["sanger_recipients"][0]],
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# check that updates were really performed on database:
updated_institute = store.institute_collection.find_one()
for key in updates.keys():
assert institute_obj[key] != updated_institute[key]
# Test CLI to update sanger recipients
result = runner.invoke(
cli, ["update", "institute", "cust000", "-s", institute_obj["sanger_recipients"][0]],
)
# it should return error message
assert result.exit_code == 0
assert "INFO Institute updated" in result.output
# make sure that recipient has been introduced
updated_institute = store.institute_collection.find_one()
# updated sanger recipients should be equal but in reversed order
# to recipients in original institute object
assert updated_institute["sanger_recipients"] == institute_obj["sanger_recipients"][::-1]
|
import sqlite3
import xbmc
import xbmcvfs
from lib.libs.addonsettings import settings
VERSION = 1
# DEPRECATED short 2017-08-26: `medialabel IS NULL` x2 is only for transitioning from VERSION = 0
# maybe the first check in `_get_version` can go later on
class ProcessedItems(object):
def __init__(self):
# data is uniqueid for sets, last known season for TV shows
self.db = Database('processeditems', upgrade_processeditems)
def is_stale(self, mediaid, mediatype, medialabel):
result = self.db.fetchone("""SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?
AND (medialabel=? or medialabel IS NULL) AND nextdate > datetime('now')""", (mediaid, mediatype, medialabel))
return True if not result else False
def set_nextdate(self, mediaid, mediatype, medialabel, nextdate):
exists = self._key_exists(mediaid, mediatype)
scriptbit = "datetime('{0}')".format(nextdate) if nextdate else 'null'
script = "UPDATE processeditems SET nextdate={0}, medialabel=? WHERE mediaid=? AND mediatype=?" if exists \
else "INSERT INTO processeditems (nextdate, medialabel, mediaid, mediatype) VALUES ({0}, ?, ?, ?)"
self.db.execute(script.format(scriptbit), (medialabel, mediaid, mediatype))
def get_data(self, mediaid, mediatype):
result = self.db.fetchone("SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?", (mediaid, mediatype))
if result:
return result['data']
def set_data(self, mediaid, mediatype, medialabel, data):
exists = self._key_exists(mediaid, mediatype)
script = "UPDATE processeditems SET data=?, medialabel=? WHERE mediaid=? AND mediatype=?" if exists \
else "INSERT INTO processeditems (data, medialabel, mediaid, mediatype) VALUES (?, ?, ?, ?)"
self.db.execute(script, (data, medialabel, mediaid, mediatype))
def exists(self, mediaid, mediatype, medialabel):
return bool(self.db.fetchone("""SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?
AND (medialabel=? or medialabel IS NULL)""", (mediaid, mediatype, medialabel)))
def does_not_exist(self, mediaid, mediatype, medialabel):
return not self.exists(mediaid, mediatype, medialabel)
def _key_exists(self, mediaid, mediatype):
return bool(self.db.fetchone("SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?",
(mediaid, mediatype)))
def upgrade_processeditems(db, fromversion):
if fromversion == VERSION:
_quickfix_beta(db)
return VERSION
if fromversion == -1:
# new install, build the database fresh
db.execute("""CREATE TABLE processeditems (mediaid INTEGER NOT NULL, mediatype TEXT NOT NULL,
medialabel TEXT, nextdate DATETIME, data TEXT, PRIMARY KEY (mediaid, mediatype))""")
return VERSION
workingversion = fromversion
if workingversion == 0:
db.execute("""ALTER TABLE processeditems ADD COLUMN medialabel TEXT""")
workingversion = 1
return workingversion
def _quickfix_beta(db):
# DEPRECATED: This is only for beta 3, goes away before final
fixids = []
for row in db.fetchall("SELECT * FROM processeditems"):
if not row:
continue
try:
int(row['mediaid'])
except ValueError:
fixids.append(("DELETE FROM processeditems WHERE mediaid=?", (row['mediaid'],)))
if fixids:
db.executemany(*fixids)
SETTINGS_TABLE_VALUE = 'database-settings'
# must be quoted to use as identifier
SETTINGS_TABLE = '"{0}"'.format(SETTINGS_TABLE_VALUE)
class Database(object):
def __init__(self, databasename, upgrade_fn):
dbpath = settings.datapath
if not xbmcvfs.exists(dbpath):
xbmcvfs.mkdir(dbpath)
dbpath = xbmc.translatePath(dbpath + databasename + '.db')
self._conn = sqlite3.connect(dbpath)
self._conn.row_factory = sqlite3.Row
self._conn.text_factory = str
self._cursor = self._conn.cursor()
self._setup(upgrade_fn)
def execute(self, query, args=()):
with self._conn:
self._execute_raw(query, args)
def executemany(self, *queriesandargs):
with self._conn:
for queryargs in queriesandargs:
self._execute_raw(*queryargs)
def fetchall(self, query, args=()):
self._execute_raw(query, args)
return self._cursor.fetchall()
def fetchone(self, query, args=()):
self._execute_raw(query, args)
return self._cursor.fetchone()
def _execute_raw(self, query, args=()):
self._cursor.execute(query, args)
def _setup(self, upgrade_fn):
version = self._get_version()
newversion = upgrade_fn(self, version)
if version != newversion:
self._update_version(newversion)
def _build_settings(self, version=-1):
self.executemany(
("CREATE TABLE {0} (name TEXT PRIMARY KEY NOT NULL, value TEXT)".format(SETTINGS_TABLE),),
("INSERT INTO {0} (name, value) VALUES ('database version', ?)".format(SETTINGS_TABLE), (str(version),))
)
def _get_version(self):
if not self.fetchone("SELECT name FROM sqlite_master WHERE type='table' AND name='processeditems'"):
self._build_settings()
return -1 # for future databases, this check goes away ...
if not self.fetchone("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (SETTINGS_TABLE_VALUE,)):
self._build_settings(0) # ... and this is empty
return 0 # and returns -1
return int(self._get_setting_value('database version', 0))
def _get_setting_value(self, settingname, default=None):
result = self.fetchone("SELECT value FROM {0} WHERE name=?".format(SETTINGS_TABLE), (settingname,))
if not result:
return default
return result['value']
def _update_version(self, newversion):
exists = bool(self.fetchone("SELECT * FROM {0} WHERE name='database version'".format(SETTINGS_TABLE)))
script = "UPDATE {0} SET value=? WHERE name=?" if exists else "INSERT INTO {0} (value, name) VALUES (?, ?)"
self.execute(script.format(SETTINGS_TABLE), (str(newversion), 'database version'))
remove beta3 hack fix
import sqlite3
import xbmc
import xbmcvfs
from lib.libs.addonsettings import settings
VERSION = 1
# DEPRECATED short 2017-08-26: `medialabel IS NULL` x2 is only for transitioning from VERSION = 0
# maybe the first check in `_get_version` can go later on
class ProcessedItems(object):
def __init__(self):
# data is uniqueid for sets, last known season for TV shows
self.db = Database('processeditems', upgrade_processeditems)
def is_stale(self, mediaid, mediatype, medialabel):
result = self.db.fetchone("""SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?
AND (medialabel=? or medialabel IS NULL) AND nextdate > datetime('now')""", (mediaid, mediatype, medialabel))
return True if not result else False
def set_nextdate(self, mediaid, mediatype, medialabel, nextdate):
exists = self._key_exists(mediaid, mediatype)
scriptbit = "datetime('{0}')".format(nextdate) if nextdate else 'null'
script = "UPDATE processeditems SET nextdate={0}, medialabel=? WHERE mediaid=? AND mediatype=?" if exists \
else "INSERT INTO processeditems (nextdate, medialabel, mediaid, mediatype) VALUES ({0}, ?, ?, ?)"
self.db.execute(script.format(scriptbit), (medialabel, mediaid, mediatype))
def get_data(self, mediaid, mediatype):
result = self.db.fetchone("SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?", (mediaid, mediatype))
if result:
return result['data']
def set_data(self, mediaid, mediatype, medialabel, data):
exists = self._key_exists(mediaid, mediatype)
script = "UPDATE processeditems SET data=?, medialabel=? WHERE mediaid=? AND mediatype=?" if exists \
else "INSERT INTO processeditems (data, medialabel, mediaid, mediatype) VALUES (?, ?, ?, ?)"
self.db.execute(script, (data, medialabel, mediaid, mediatype))
def exists(self, mediaid, mediatype, medialabel):
return bool(self.db.fetchone("""SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?
AND (medialabel=? or medialabel IS NULL)""", (mediaid, mediatype, medialabel)))
def does_not_exist(self, mediaid, mediatype, medialabel):
return not self.exists(mediaid, mediatype, medialabel)
def _key_exists(self, mediaid, mediatype):
return bool(self.db.fetchone("SELECT * FROM processeditems WHERE mediaid=? AND mediatype=?",
(mediaid, mediatype)))
def upgrade_processeditems(db, fromversion):
if fromversion == VERSION:
return VERSION
if fromversion == -1:
# new install, build the database fresh
db.execute("""CREATE TABLE processeditems (mediaid INTEGER NOT NULL, mediatype TEXT NOT NULL,
medialabel TEXT, nextdate DATETIME, data TEXT, PRIMARY KEY (mediaid, mediatype))""")
return VERSION
workingversion = fromversion
if workingversion == 0:
db.execute("""ALTER TABLE processeditems ADD COLUMN medialabel TEXT""")
workingversion = 1
return workingversion
SETTINGS_TABLE_VALUE = 'database-settings'
# must be quoted to use as identifier
SETTINGS_TABLE = '"{0}"'.format(SETTINGS_TABLE_VALUE)
class Database(object):
def __init__(self, databasename, upgrade_fn):
dbpath = settings.datapath
if not xbmcvfs.exists(dbpath):
xbmcvfs.mkdir(dbpath)
dbpath = xbmc.translatePath(dbpath + databasename + '.db')
self._conn = sqlite3.connect(dbpath)
self._conn.row_factory = sqlite3.Row
self._conn.text_factory = str
self._cursor = self._conn.cursor()
self._setup(upgrade_fn)
def execute(self, query, args=()):
with self._conn:
self._execute_raw(query, args)
def executemany(self, *queriesandargs):
with self._conn:
for queryargs in queriesandargs:
self._execute_raw(*queryargs)
def fetchall(self, query, args=()):
self._execute_raw(query, args)
return self._cursor.fetchall()
def fetchone(self, query, args=()):
self._execute_raw(query, args)
return self._cursor.fetchone()
def _execute_raw(self, query, args=()):
self._cursor.execute(query, args)
def _setup(self, upgrade_fn):
version = self._get_version()
newversion = upgrade_fn(self, version)
if version != newversion:
self._update_version(newversion)
def _build_settings(self, version=-1):
self.executemany(
("CREATE TABLE {0} (name TEXT PRIMARY KEY NOT NULL, value TEXT)".format(SETTINGS_TABLE),),
("INSERT INTO {0} (name, value) VALUES ('database version', ?)".format(SETTINGS_TABLE), (str(version),))
)
def _get_version(self):
if not self.fetchone("SELECT name FROM sqlite_master WHERE type='table' AND name='processeditems'"):
self._build_settings()
return -1 # for future databases, this check goes away ...
if not self.fetchone("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (SETTINGS_TABLE_VALUE,)):
self._build_settings(0) # ... and this is empty
return 0 # and returns -1
return int(self._get_setting_value('database version', 0))
def _get_setting_value(self, settingname, default=None):
result = self.fetchone("SELECT value FROM {0} WHERE name=?".format(SETTINGS_TABLE), (settingname,))
if not result:
return default
return result['value']
def _update_version(self, newversion):
exists = bool(self.fetchone("SELECT * FROM {0} WHERE name='database version'".format(SETTINGS_TABLE)))
script = "UPDATE {0} SET value=? WHERE name=?" if exists else "INSERT INTO {0} (value, name) VALUES (?, ?)"
self.execute(script.format(SETTINGS_TABLE), (str(newversion), 'database version'))
|
"""
Implements the EN increasing depth check.
"""
import EN_spike_and_step_check
import numpy as np
from collections import Counter
import util.main as main
import sys
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Check if the QC of this profile was already done and if not
# run the QC.
query = 'SELECT en_increasing_depth_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(p.uid()) + ';'
qc_log = main.dbinteract(query)
qc_log = main.unpack_row(qc_log[0])
if qc_log[0] is not None:
return qc_log[0]
return run_qc(p, parameters)
def mask_index(mat, index):
"""
update comparison matrix by setting (index,j) and (i,index) to 0 for all i,j
corresponds to recomputing the matrix after qc[index] is set True.
"""
n = len(mat)
for i in range(n):
mat[index, i] = 0
mat[i, index] = 0
def run_qc(p, parameters):
# Get z values from the profile.
d = p.z()
mask = d.mask
n = p.n_levels()
# Initialize qc array.
qc = np.zeros(n, dtype=bool)
# if all the depths are the same, flag all levels and finish immediately
most_common_depth = Counter(d.data).most_common(1)
if most_common_depth[0][1] == len(d.data):
qc = np.ones(n, dtype=bool)
uid = p.uid()
return qc
# Basic check on each level.
qc[d < 0] = True
qc[d > 11000] = True
# Now check for inconsistencies in the depth levels.
comp = np.ndarray((n, n), dtype=int)
currentMax = 1
# initialize matrix
# Comp gets set to 1 if there is not an increase in depth.
comp[:, :] = 0
for i in range(n):
if qc[i] or mask[i]: continue
for j in range(n):
if qc[j] or mask[j] or (i == j): continue
if i < j:
if d[i] >= d[j]: comp[i, j] = 1
else:
if d[i] <= d[j]: comp[i, j] = 1
while currentMax > 0:
# Check if comp was set to 1 anywhere and which level was
# most inconsistent with the others.
currentMax = 0
currentLev = -1
otherLev = -1
for i in range(n):
lineSum = np.sum(comp[:, i])
if lineSum >= currentMax:
currentMax = lineSum
currentLev = i
# Reject immediately if more than one inconsistency or
# investigate further if one inconsistency.
if currentMax > 1:
qc[currentLev] = True
elif currentMax == 1:
# Find out which level it is inconsistent with.
for i in range(n):
if comp[i, currentLev] == 1: otherLev = i
# Check if one was rejected by the spike and step
# check, otherwise reject both.
try:
spikeqc
except:
spikeqc = EN_spike_and_step_check.test(p, parameters)
if spikeqc[currentLev]: qc[currentLev] = True
if spikeqc[otherLev]: qc[otherLev] = True
if spikeqc[currentLev] == False and spikeqc[otherLev] == False:
qc[currentLev] = True
qc[otherLev] = True
# update comp matrix:
if currentLev > -1:
mask_index(comp, currentLev)
if otherLev > -1:
mask_index(comp, otherLev)
return qc
drop unnecessary package include
"""
Implements the EN increasing depth check.
"""
import EN_spike_and_step_check
import numpy as np
from collections import Counter
import util.main as main
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Check if the QC of this profile was already done and if not
# run the QC.
query = 'SELECT en_increasing_depth_check FROM ' + parameters["table"] + ' WHERE uid = ' + str(p.uid()) + ';'
qc_log = main.dbinteract(query)
qc_log = main.unpack_row(qc_log[0])
if qc_log[0] is not None:
return qc_log[0]
return run_qc(p, parameters)
def mask_index(mat, index):
"""
update comparison matrix by setting (index,j) and (i,index) to 0 for all i,j
corresponds to recomputing the matrix after qc[index] is set True.
"""
n = len(mat)
for i in range(n):
mat[index, i] = 0
mat[i, index] = 0
def run_qc(p, parameters):
# Get z values from the profile.
d = p.z()
mask = d.mask
n = p.n_levels()
# Initialize qc array.
qc = np.zeros(n, dtype=bool)
# if all the depths are the same, flag all levels and finish immediately
most_common_depth = Counter(d.data).most_common(1)
if most_common_depth[0][1] == len(d.data):
qc = np.ones(n, dtype=bool)
uid = p.uid()
return qc
# Basic check on each level.
qc[d < 0] = True
qc[d > 11000] = True
# Now check for inconsistencies in the depth levels.
comp = np.ndarray((n, n), dtype=int)
currentMax = 1
# initialize matrix
# Comp gets set to 1 if there is not an increase in depth.
comp[:, :] = 0
for i in range(n):
if qc[i] or mask[i]: continue
for j in range(n):
if qc[j] or mask[j] or (i == j): continue
if i < j:
if d[i] >= d[j]: comp[i, j] = 1
else:
if d[i] <= d[j]: comp[i, j] = 1
while currentMax > 0:
# Check if comp was set to 1 anywhere and which level was
# most inconsistent with the others.
currentMax = 0
currentLev = -1
otherLev = -1
for i in range(n):
lineSum = np.sum(comp[:, i])
if lineSum >= currentMax:
currentMax = lineSum
currentLev = i
# Reject immediately if more than one inconsistency or
# investigate further if one inconsistency.
if currentMax > 1:
qc[currentLev] = True
elif currentMax == 1:
# Find out which level it is inconsistent with.
for i in range(n):
if comp[i, currentLev] == 1: otherLev = i
# Check if one was rejected by the spike and step
# check, otherwise reject both.
try:
spikeqc
except:
spikeqc = EN_spike_and_step_check.test(p, parameters)
if spikeqc[currentLev]: qc[currentLev] = True
if spikeqc[otherLev]: qc[otherLev] = True
if spikeqc[currentLev] == False and spikeqc[otherLev] == False:
qc[currentLev] = True
qc[otherLev] = True
# update comp matrix:
if currentLev > -1:
mask_index(comp, currentLev)
if otherLev > -1:
mask_index(comp, otherLev)
return qc
|
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for requesting information for a gerrit server via https.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
"""
import base64
import httplib
import json
import logging
import netrc
import os
import time
import urllib
from cStringIO import StringIO
try:
NETRC = netrc.netrc()
except (IOError, netrc.NetrcParseError):
NETRC = netrc.netrc(os.devnull)
LOGGER = logging.getLogger()
TRY_LIMIT = 5
class GOBError(Exception):
"""Exception class for errors commuicating with the gerrit-on-borg service."""
def __init__(self, http_status, *args, **kwargs):
super(GOBError, self).__init__(*args, **kwargs)
self.http_status = http_status
self.message = '(%d) %s' % (self.http_status, self.message)
def _QueryString(param_dict, first_param=None):
"""Encodes query parameters in the key:val[+key:val...] format specified here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
"""
q = [urllib.quote(first_param)] if first_param else []
q.extend(['%s:%s' % (key, val) for key, val in param_dict.iteritems()])
return '+'.join(q)
def CreateHttpConn(host, path, reqtype='GET', headers=None, body=None):
"""Opens an https connection to a gerrit service, and sends a request."""
headers = headers or {}
bare_host = host.partition(';')[0]
auth = NETRC.authenticators(bare_host)
if auth:
headers.setdefault('Authorization', 'Basic %s' % (
base64.b64encode('%s:%s' % (auth[0], auth[2]))))
if body:
body = json.JSONEncoder().encode(body)
headers.setdefault('Content-Type', 'application/json')
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('%s https://%s/a/%s' % (reqtype, host, path))
for key, val in headers.iteritems():
if key == 'Authorization':
val = 'HIDDEN'
LOGGER.debug('%s: %s' % (key, val))
if body:
LOGGER.debug(body)
conn = httplib.HTTPSConnection(host)
conn.host = host
conn.req_params = {
'url': '/a/%s' % path,
'method': reqtype,
'headers': headers,
'body': body,
}
conn.request(**conn.req_params)
return conn
def ReadHttpResponse(conn, ignore_404=True):
"""Reads an http response from a connection into a string buffer.
Args:
conn: An HTTPSConnection created by CreateHttpConn, above.
ignore_404: For many requests, gerrit-on-borg will return 404 if the request
doesn't match the database contents. In most such cases, we
want the API to return None rather than raise an Exception.
Returns: A string buffer containing the connection's reply.
"""
sleep_time = 0.5
for idx in range(TRY_LIMIT):
response = conn.getresponse()
# If response.status < 500 then the result is final; break retry loop.
if response.status < 500:
break
# A status >=500 is assumed to be a possible transient error; retry.
http_version = 'HTTP/%s' % ('1.1' if response.version == 11 else '1.0')
msg = (
'A transient error occured while querying %s:\n'
'%s %s %s\n'
'%s %d %s' % (
conn.host, conn.req_params['method'], conn.req_params['url'],
http_version, http_version, response.status, response.reason))
if TRY_LIMIT - idx > 1:
msg += '\n... will retry %d more times.' % (TRY_LIMIT - idx - 1)
time.sleep(sleep_time)
sleep_time = sleep_time * 2
req_params = conn.req_params
conn = httplib.HTTPSConnection(conn.host)
conn.req_params = req_params
conn.request(**req_params)
LOGGER.warn(msg)
if ignore_404 and response.status == 404:
return StringIO()
if response.status != 200:
raise GOBError(response.status, response.reason)
return StringIO(response.read())
def ReadHttpJsonResponse(conn, ignore_404=True):
"""Parses an https response as json."""
fh = ReadHttpResponse(conn, ignore_404=ignore_404)
# The first line of the response should always be: )]}'
s = fh.readline()
if s and s.rstrip() != ")]}'":
raise GOBError(200, 'Unexpected json output: %s' % s)
s = fh.read()
if not s:
return None
return json.loads(s)
def QueryChanges(host, param_dict, first_param=None, limit=None, o_params=None):
"""
Queries a gerrit-on-borg server for changes matching query terms.
Args:
param_dict: A dictionary of search parameters, as documented here:
http://gerrit-documentation.googlecode.com/svn/Documentation/2.6/user-search.html
first_param: A change identifier
limit: Maximum number of results to return.
o_params: A list of additional output specifiers, as documented here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
Returns:
A list of json-decoded query results.
"""
# Note that no attempt is made to escape special characters; YMMV.
if not param_dict and not first_param:
raise RuntimeError('QueryChanges requires search parameters')
path = 'changes/?q=%s' % _QueryString(param_dict, first_param)
if limit:
path = '%s&n=%d' % (path, limit)
if o_params:
path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
# Don't ignore 404; a query should always return a list, even if it's empty.
return ReadHttpJsonResponse(CreateHttpConn(host, path), ignore_404=False)
def MultiQueryChanges(host, param_dict, change_list, limit=None, o_params=None):
"""Initiate a query composed of multiple sets of query parameters."""
if not change_list:
raise RuntimeError(
"MultiQueryChanges requires a list of change numbers/id's")
q = ['q=%s' % '+OR+'.join([urllib.quote(str(x)) for x in change_list])]
if param_dict:
q.append(_QueryString(param_dict))
if limit:
q.append('n=%d' % limit)
if o_params:
q.extend(['o=%s' % p for p in o_params])
path = 'changes/?%s' % '&'.join(q)
try:
result = ReadHttpJsonResponse(CreateHttpConn(host, path), ignore_404=False)
except GOBError as e:
msg = '%s:\n%s' % (e.message, path)
raise GOBError(e.http_status, msg)
return result
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return 'https://%s/a/' % host
def GetChangePageUrl(host, change_number):
"""Given a gerrit host name and change number, return change page url."""
return 'https://%s/#/c/%d/' % (host, change_number)
def GetChangeUrl(host, change):
"""Given a gerrit host name and change id, return an url for the change."""
return 'https://%s/a/changes/%s' % (host, change)
def GetChange(host, change):
"""Query a gerrit server for information about a single change."""
path = 'changes/%s' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeDetail(host, change, o_params=None):
"""Query a gerrit server for extended information about a single change."""
path = 'changes/%s/detail' % change
if o_params:
path += '?%s' % '&'.join(['o=%s' % p for p in o_params])
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCurrentRevision(host, change):
"""Get information about the latest revision for a given change."""
return QueryChanges(host, {}, change, o_params=('CURRENT_REVISION',))
def GetChangeRevisions(host, change):
"""Get information about all revisions associated with a change."""
return QueryChanges(host, {}, change, o_params=('ALL_REVISIONS',))
def GetChangeReview(host, change, revision=None):
"""Get the current review information for a change."""
if not revision:
jmsg = GetChangeRevisions(host, change)
if not jmsg:
return None
elif len(jmsg) > 1:
raise GOBError(200, 'Multiple changes found for ChangeId %s.' % change)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review'
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AbandonChange(host, change, msg=''):
"""Abandon a gerrit change."""
path = 'changes/%s/abandon' % change
body = {'message': msg}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn, ignore_404=False)
def SubmitChange(host, change, wait_for_merge=True):
"""Submits a gerrit change via Gerrit."""
path = 'changes/%s/submit' % change
body = {'wait_for_merge': wait_for_merge}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn, ignore_404=False)
def GetReviewers(host, change):
"""Get information about all reviewers attached to a change."""
path = 'changes/%s/reviewers' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetReview(host, change, revision):
"""Get review information about a specific revision of a change."""
path = 'changes/%s/revisions/%s/review' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AddReviewers(host, change, add=None):
"""Add reviewers to a change."""
if not add:
return
if isinstance(add, basestring):
add = (add,)
path = 'changes/%s/reviewers' % change
for r in add:
body = {'reviewer': r}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
jmsg = ReadHttpJsonResponse(conn, ignore_404=False)
return jmsg
def RemoveReviewers(host, change, remove=None):
"""Remove reveiewers from a change."""
if not remove:
return
if isinstance(remove, basestring):
remove = (remove,)
for r in remove:
path = 'change/%s/reviewers/%s' % (change, r)
conn = CreateHttpConn(host, path, reqtype='DELETE')
try:
ReadHttpResponse(conn, ignore_404=False)
except GOBError as e:
# On success, gerrit returns status 204; anything else is an error.
if e.http_status != 204:
raise
else:
raise GOBError(
'Unexpectedly received a 200 http status while deleting reviewer "%s"'
' from change %s' % (r, change))
def ResetReviewLabels(host, change, label, value='0', message=None):
"""Reset the value of a given label for all reviewers on a change."""
# This is tricky, because we want to work on the "current revision", but
# there's always the risk that "current revision" will change in between
# API calls. So, we check "current revision" at the beginning and end; if
# it has changed, raise an exception.
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GOBError(
200, 'Could not get review information for change "%s"' % change)
value = str(value)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review' % (change, revision)
message = message or (
'%s label set to %s programmatically by chromite.' % (label, value))
jmsg = GetReview(host, change, revision)
if not jmsg:
raise GOBError(200, 'Could not get review information for revison %s '
'of change %s' % (revision, change))
for review in jmsg.get('labels', {}).get('Commit-Queue', {}).get('all', []):
if str(review.get('value', value)) != value:
body = {
'message': message,
'labels': {label: value},
'on_behalf_of': review['_account_id'],
}
conn = CreateHttpConn(
host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if str(response['labels'][label]) != value:
username = review.get('email', jmsg.get('name', ''))
raise GOBError(200, 'Unable to set %s label for user "%s"'
' on change %s.' % (label, username, change))
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GOBError(
200, 'Could not get review information for change "%s"' % change)
elif jmsg[0]['current_revision'] != revision:
raise GOBError(200, 'While resetting labels on change "%s", '
'a new patchset was uploaded.' % change)
Copy "host" field to retry HTTPSConnections.
BUG=chromium:284692
TEST=None
Change-Id: I2f7f8061b4b8fd87012f53f4670fd7b9f01007af
Reviewed-on: https://chromium-review.googlesource.com/168931
Reviewed-by: Stefan Zager <542cb5b73770f9741bae40d34cf7a192bc7ad2cc@chromium.org>
Tested-by: Stefan Zager <542cb5b73770f9741bae40d34cf7a192bc7ad2cc@chromium.org>
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for requesting information for a gerrit server via https.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
"""
import base64
import httplib
import json
import logging
import netrc
import os
import time
import urllib
from cStringIO import StringIO
try:
NETRC = netrc.netrc()
except (IOError, netrc.NetrcParseError):
NETRC = netrc.netrc(os.devnull)
LOGGER = logging.getLogger()
TRY_LIMIT = 5
class GOBError(Exception):
"""Exception class for errors commuicating with the gerrit-on-borg service."""
def __init__(self, http_status, *args, **kwargs):
super(GOBError, self).__init__(*args, **kwargs)
self.http_status = http_status
self.message = '(%d) %s' % (self.http_status, self.message)
def _QueryString(param_dict, first_param=None):
"""Encodes query parameters in the key:val[+key:val...] format specified here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
"""
q = [urllib.quote(first_param)] if first_param else []
q.extend(['%s:%s' % (key, val) for key, val in param_dict.iteritems()])
return '+'.join(q)
def CreateHttpConn(host, path, reqtype='GET', headers=None, body=None):
"""Opens an https connection to a gerrit service, and sends a request."""
headers = headers or {}
bare_host = host.partition(';')[0]
auth = NETRC.authenticators(bare_host)
if auth:
headers.setdefault('Authorization', 'Basic %s' % (
base64.b64encode('%s:%s' % (auth[0], auth[2]))))
if body:
body = json.JSONEncoder().encode(body)
headers.setdefault('Content-Type', 'application/json')
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('%s https://%s/a/%s' % (reqtype, host, path))
for key, val in headers.iteritems():
if key == 'Authorization':
val = 'HIDDEN'
LOGGER.debug('%s: %s' % (key, val))
if body:
LOGGER.debug(body)
conn = httplib.HTTPSConnection(host)
conn.host = host
conn.req_params = {
'url': '/a/%s' % path,
'method': reqtype,
'headers': headers,
'body': body,
}
conn.request(**conn.req_params)
return conn
def ReadHttpResponse(conn, ignore_404=True):
"""Reads an http response from a connection into a string buffer.
Args:
conn: An HTTPSConnection created by CreateHttpConn, above.
ignore_404: For many requests, gerrit-on-borg will return 404 if the request
doesn't match the database contents. In most such cases, we
want the API to return None rather than raise an Exception.
Returns: A string buffer containing the connection's reply.
"""
sleep_time = 0.5
for idx in range(TRY_LIMIT):
response = conn.getresponse()
# If response.status < 500 then the result is final; break retry loop.
if response.status < 500:
break
# A status >=500 is assumed to be a possible transient error; retry.
http_version = 'HTTP/%s' % ('1.1' if response.version == 11 else '1.0')
msg = (
'A transient error occured while querying %s:\n'
'%s %s %s\n'
'%s %d %s' % (
conn.host, conn.req_params['method'], conn.req_params['url'],
http_version, http_version, response.status, response.reason))
if TRY_LIMIT - idx > 1:
msg += '\n... will retry %d more times.' % (TRY_LIMIT - idx - 1)
time.sleep(sleep_time)
sleep_time = sleep_time * 2
host = conn.host
req_params = conn.req_params
conn = httplib.HTTPSConnection(host)
conn.host = host
conn.req_params = req_params
conn.request(**req_params)
LOGGER.warn(msg)
if ignore_404 and response.status == 404:
return StringIO()
if response.status != 200:
raise GOBError(response.status, response.reason)
return StringIO(response.read())
def ReadHttpJsonResponse(conn, ignore_404=True):
"""Parses an https response as json."""
fh = ReadHttpResponse(conn, ignore_404=ignore_404)
# The first line of the response should always be: )]}'
s = fh.readline()
if s and s.rstrip() != ")]}'":
raise GOBError(200, 'Unexpected json output: %s' % s)
s = fh.read()
if not s:
return None
return json.loads(s)
def QueryChanges(host, param_dict, first_param=None, limit=None, o_params=None):
"""
Queries a gerrit-on-borg server for changes matching query terms.
Args:
param_dict: A dictionary of search parameters, as documented here:
http://gerrit-documentation.googlecode.com/svn/Documentation/2.6/user-search.html
first_param: A change identifier
limit: Maximum number of results to return.
o_params: A list of additional output specifiers, as documented here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
Returns:
A list of json-decoded query results.
"""
# Note that no attempt is made to escape special characters; YMMV.
if not param_dict and not first_param:
raise RuntimeError('QueryChanges requires search parameters')
path = 'changes/?q=%s' % _QueryString(param_dict, first_param)
if limit:
path = '%s&n=%d' % (path, limit)
if o_params:
path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
# Don't ignore 404; a query should always return a list, even if it's empty.
return ReadHttpJsonResponse(CreateHttpConn(host, path), ignore_404=False)
def MultiQueryChanges(host, param_dict, change_list, limit=None, o_params=None):
"""Initiate a query composed of multiple sets of query parameters."""
if not change_list:
raise RuntimeError(
"MultiQueryChanges requires a list of change numbers/id's")
q = ['q=%s' % '+OR+'.join([urllib.quote(str(x)) for x in change_list])]
if param_dict:
q.append(_QueryString(param_dict))
if limit:
q.append('n=%d' % limit)
if o_params:
q.extend(['o=%s' % p for p in o_params])
path = 'changes/?%s' % '&'.join(q)
try:
result = ReadHttpJsonResponse(CreateHttpConn(host, path), ignore_404=False)
except GOBError as e:
msg = '%s:\n%s' % (e.message, path)
raise GOBError(e.http_status, msg)
return result
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return 'https://%s/a/' % host
def GetChangePageUrl(host, change_number):
"""Given a gerrit host name and change number, return change page url."""
return 'https://%s/#/c/%d/' % (host, change_number)
def GetChangeUrl(host, change):
"""Given a gerrit host name and change id, return an url for the change."""
return 'https://%s/a/changes/%s' % (host, change)
def GetChange(host, change):
"""Query a gerrit server for information about a single change."""
path = 'changes/%s' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeDetail(host, change, o_params=None):
"""Query a gerrit server for extended information about a single change."""
path = 'changes/%s/detail' % change
if o_params:
path += '?%s' % '&'.join(['o=%s' % p for p in o_params])
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCurrentRevision(host, change):
"""Get information about the latest revision for a given change."""
return QueryChanges(host, {}, change, o_params=('CURRENT_REVISION',))
def GetChangeRevisions(host, change):
"""Get information about all revisions associated with a change."""
return QueryChanges(host, {}, change, o_params=('ALL_REVISIONS',))
def GetChangeReview(host, change, revision=None):
"""Get the current review information for a change."""
if not revision:
jmsg = GetChangeRevisions(host, change)
if not jmsg:
return None
elif len(jmsg) > 1:
raise GOBError(200, 'Multiple changes found for ChangeId %s.' % change)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review'
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AbandonChange(host, change, msg=''):
"""Abandon a gerrit change."""
path = 'changes/%s/abandon' % change
body = {'message': msg}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn, ignore_404=False)
def SubmitChange(host, change, wait_for_merge=True):
"""Submits a gerrit change via Gerrit."""
path = 'changes/%s/submit' % change
body = {'wait_for_merge': wait_for_merge}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn, ignore_404=False)
def GetReviewers(host, change):
"""Get information about all reviewers attached to a change."""
path = 'changes/%s/reviewers' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetReview(host, change, revision):
"""Get review information about a specific revision of a change."""
path = 'changes/%s/revisions/%s/review' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AddReviewers(host, change, add=None):
"""Add reviewers to a change."""
if not add:
return
if isinstance(add, basestring):
add = (add,)
path = 'changes/%s/reviewers' % change
for r in add:
body = {'reviewer': r}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
jmsg = ReadHttpJsonResponse(conn, ignore_404=False)
return jmsg
def RemoveReviewers(host, change, remove=None):
"""Remove reveiewers from a change."""
if not remove:
return
if isinstance(remove, basestring):
remove = (remove,)
for r in remove:
path = 'change/%s/reviewers/%s' % (change, r)
conn = CreateHttpConn(host, path, reqtype='DELETE')
try:
ReadHttpResponse(conn, ignore_404=False)
except GOBError as e:
# On success, gerrit returns status 204; anything else is an error.
if e.http_status != 204:
raise
else:
raise GOBError(
'Unexpectedly received a 200 http status while deleting reviewer "%s"'
' from change %s' % (r, change))
def ResetReviewLabels(host, change, label, value='0', message=None):
"""Reset the value of a given label for all reviewers on a change."""
# This is tricky, because we want to work on the "current revision", but
# there's always the risk that "current revision" will change in between
# API calls. So, we check "current revision" at the beginning and end; if
# it has changed, raise an exception.
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GOBError(
200, 'Could not get review information for change "%s"' % change)
value = str(value)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review' % (change, revision)
message = message or (
'%s label set to %s programmatically by chromite.' % (label, value))
jmsg = GetReview(host, change, revision)
if not jmsg:
raise GOBError(200, 'Could not get review information for revison %s '
'of change %s' % (revision, change))
for review in jmsg.get('labels', {}).get('Commit-Queue', {}).get('all', []):
if str(review.get('value', value)) != value:
body = {
'message': message,
'labels': {label: value},
'on_behalf_of': review['_account_id'],
}
conn = CreateHttpConn(
host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if str(response['labels'][label]) != value:
username = review.get('email', jmsg.get('name', ''))
raise GOBError(200, 'Unable to set %s label for user "%s"'
' on change %s.' % (label, username, change))
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GOBError(
200, 'Could not get review information for change "%s"' % change)
elif jmsg[0]['current_revision'] != revision:
raise GOBError(200, 'While resetting labels on change "%s", '
'a new patchset was uploaded.' % change)
|
from __future__ import print_function
from collections import OrderedDict, MutableMapping
from pprint import pprint
from sys import version_info
from inspect import ismethod
# for debugging
def here(item=None):
out = 'here'
if item != None:
out += '({})'.format(item)
print(out)
class DotMap(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
self._map = OrderedDict()
self._dynamic = True
if kwargs:
if '_dynamic' in kwargs:
self._dynamic = kwargs['_dynamic']
if args:
d = args[0]
if isinstance(d, dict):
for k,v in self.__call_items(d):
if isinstance(v, dict):
v = DotMap(v, _dynamic=self._dynamic)
if type(v) is list:
l = []
for i in v:
n = i
if type(i) is dict:
n = DotMap(i, _dynamic=self._dynamic)
l.append(n)
v = l
self._map[k] = v
if kwargs:
for k,v in self.__call_items(kwargs):
if k is not '_dynamic':
self._map[k] = v
def __call_items(self, obj):
if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
return obj.iteritems()
else:
return obj.items()
def items(self):
return self.iteritems()
def iteritems(self):
return self.__call_items(self._map)
def __iter__(self):
return self._map.__iter__()
def next(self):
return self._map.next()
def __setitem__(self, k, v):
self._map[k] = v
def __getitem__(self, k):
if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
# automatically extend to new DotMap
self[k] = DotMap()
return self._map[k]
def __setattr__(self, k, v):
if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_'}:
super(DotMap, self).__setattr__(k,v)
else:
self[k] = v
def __getattr__(self, k):
if k == {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
super(DotMap, self).__getattr__(k)
else:
return self[k]
def __delattr__(self, key):
return self._map.__delitem__(key)
def __contains__(self, k):
return self._map.__contains__(k)
def __str__(self):
items = []
for k,v in self.__call_items(self._map):
# bizarre recursive assignment situation (why someone would do this is beyond me)
if id(v) == id(self):
items.append('{0}=DotMap(...)'.format(k))
else:
items.append('{0}={1}'.format(k, repr(v)))
out = 'DotMap({0})'.format(', '.join(items))
return out
def __repr__(self):
return str(self)
def toDict(self):
d = {}
for k,v in self.items():
if type(v) is DotMap:
# bizarre recursive assignment support
if id(v) == id(self):
v = d
else:
v = v.toDict()
elif type(v) is list:
l = []
for i in v:
n = i
if type(i) is DotMap:
n = i.toDict()
l.append(n)
v = l
d[k] = v
return d
def pprint(self):
pprint(self.toDict())
def empty(self):
return (not any(self))
# proper dict subclassing
def values(self):
return self._map.values()
# ipython support
def __dir__(self):
return self.keys()
@classmethod
def parseOther(self, other):
if type(other) is DotMap:
return other._map
else:
return other
def __cmp__(self, other):
other = DotMap.parseOther(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = DotMap.parseOther(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = DotMap.parseOther(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = DotMap.parseOther(other)
return self._map.__gt__(other)
def __le__(self, other):
other = DotMap.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = DotMap.parseOther(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = DotMap.parseOther(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def clear(self):
self._map.clear()
def copy(self):
return DotMap(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo=None):
return self.copy()
def get(self, key, default=None):
return self._map.get(key, default)
def has_key(self, key):
return key in self._map
def iterkeys(self):
return self._map.iterkeys()
def itervalues(self):
return self._map.itervalues()
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def popitem(self):
return self._map.popitem()
def setdefault(self, key, default=None):
self._map.setdefault(key, default)
def update(self, *args, **kwargs):
if len(args) != 0:
self._map.update(*args)
self._map.update(kwargs)
def viewitems(self):
return self._map.viewitems()
def viewkeys(self):
return self._map.viewkeys()
def viewvalues(self):
return self._map.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
d = DotMap()
d._map = OrderedDict.fromkeys(seq, value)
return d
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
# bannerStr
def _getListStr(self,items):
out = '['
mid = ''
for i in items:
mid += ' {}\n'.format(i)
if mid != '':
mid = '\n' + mid
out += mid
out += ']'
return out
def _getValueStr(self,k,v):
outV = v
multiLine = len(str(v).split('\n')) > 1
if multiLine:
# push to next line
outV = '\n' + v
if type(v) is list:
outV = self._getListStr(v)
out = '{} {}'.format(k,outV)
return out
def _getSubMapDotList(self, pre, name, subMap):
outList = []
if pre == '':
pre = name
else:
pre = '{}.{}'.format(pre,name)
def stamp(pre,k,v):
valStr = self._getValueStr(k,v)
return '{}.{}'.format(pre, valStr)
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
subList = self._getSubMapDotList(pre,k,v)
outList.extend(subList)
else:
outList.append(stamp(pre,k,v))
return outList
def _getSubMapStr(self, name, subMap):
outList = ['== {} =='.format(name)]
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
# break down to dots
subList = self._getSubMapDotList('',k,v)
# add the divit
# subList = ['> {}'.format(i) for i in subList]
outList.extend(subList)
else:
out = self._getValueStr(k,v)
# out = '> {}'.format(out)
out = '{}'.format(out)
outList.append(out)
finalOut = '\n'.join(outList)
return finalOut
def bannerStr(self):
lines = []
previous = None
for k,v in self.items():
if previous == 'DotMap':
lines.append('-')
out = ''
if isinstance(v,DotMap):
name = k
subMap = v
out = self._getSubMapStr(name,subMap)
lines.append(out)
previous = 'DotMap'
else:
out = self._getValueStr(k,v)
lines.append(out)
previous = 'other'
lines.append('--')
s = '\n'.join(lines)
return s
if __name__ == '__main__':
# basics
print('\n== basics ==')
d = {
'a':1,
'b':2,
'subD': {'c':3, 'd':4}
}
dd = DotMap(d)
print(dd)
print(len(dd))
print(dd.copy())
print(dd)
print(OrderedDict.fromkeys([1,2,3]))
print(DotMap.fromkeys([1,2,3], 'a'))
print(dd.get('a'))
print(dd.get('f',33))
print(dd.get('f'))
print(dd.has_key('a'))
dd.update([('rat',5),('bum',4)], dog=7,cat=9)
dd.update({'lol':1,'ba':2})
print(dd)
print
for k in dd:
print(k)
print('a' in dd)
print('c' in dd)
dd.c.a = 1
print(dd.toDict())
dd.pprint()
print
print(dd.values())
dm = DotMap(name='Steve', job='programmer')
print(dm)
print(issubclass(dm.__class__, dict))
am = DotMap()
am.some.deep.path.cuz.we = 'can'
print(am)
del am.some.deep
print(am)
parentDict = {
'name': 'Father1',
'children': [
{'name': 'Child1'},
{'name': 'Child2'},
{'name': 'Child3'},
]
}
parent = DotMap(parentDict)
print([x.name for x in parent.children])
# pickle
print('\n== pickle ==')
import pickle
s = pickle.dumps(parent)
d = pickle.loads(s)
print(d)
# init from DotMap
print('\n== init from DotMap ==')
e = DotMap(d)
print(e)
# empty
print('\n== empty() ==')
d = DotMap()
print(d.empty())
d.a = 1
print(d.empty())
print()
x = DotMap({'a': 'b'})
print(x.b.empty()) # True (and creates empty DotMap)
print(x.b) # DotMap()
print(x.b.empty()) # also True
# _dynamic
print('\n== _dynamic ==')
d = DotMap()
d.still.works
print(d)
d = DotMap(_dynamic=False)
try:
d.no.creation
print(d)
except KeyError:
print('KeyError caught')
d = {'sub':{'a':1}}
dm = DotMap(d)
print(dm)
dm.still.works
dm.sub.still.works
print(dm)
dm2 = DotMap(d,_dynamic=False)
try:
dm.sub.yes.creation
print(dm)
dm2.sub.no.creation
print(dm)
except KeyError:
print('KeyError caught')
# _dynamic
print('\n== toDict() ==')
conf = DotMap()
conf.dep = DotMap(facts=DotMap(operating_systems=DotMap(os_CentOS_7=True), virtual_data_centers=[DotMap(name='vdc1', members=['sp1'], options=DotMap(secret_key='badsecret', description='My First VDC')), DotMap(name='vdc2', members=['sp2'], options=DotMap(secret_key='badsecret', description='My Second VDC'))], install_node='192.168.2.200', replication_group_defaults=DotMap(full_replication=False, enable_rebalancing=False, description='Default replication group description', allow_all_namespaces=False), node_defaults=DotMap(ntp_servers=['192.168.2.2'], ecs_root_user='root', dns_servers=['192.168.2.2'], dns_domain='local', ecs_root_pass='badpassword'), storage_pools=[DotMap(name='sp1', members=['192.168.2.220'], options=DotMap(ecs_block_devices=['/dev/vdb'], description='My First SP')), DotMap(name='sp2', members=['192.168.2.221'], options=DotMap(protected=False, ecs_block_devices=['/dev/vdb'], description='My Second SP'))], storage_pool_defaults=DotMap(cold_storage_enabled=False, protected=False, ecs_block_devices=['/dev/vdc'], description='Default storage pool description'), virtual_data_center_defaults=DotMap(secret_key='badsecret', description='Default virtual data center description'), management_clients=['192.168.2.0/24'], replication_groups=[DotMap(name='rg1', members=['vdc1', 'vdc2'], options=DotMap(description='My RG'))]), lawyers=DotMap(license_accepted=True))
print(conf.dep.toDict()['facts']['replication_groups'])
# recursive assignment
print('\n== recursive assignment ==')
# dict
d = dict()
d['a'] = 5
print(id(d))
d['recursive'] = d
print(d)
print(d['recursive']['recursive']['recursive'])
# DotMap
m = DotMap()
m.a = 5
print(id(m))
m.recursive = m
print(m.recursive.recursive.recursive)
print(m)
print(m.toDict())
# kwarg
print('\n== kwarg ==')
def test(**kwargs):
print(kwargs)
class D:
def keys(self):
return ['a', 'b']
def __getitem__(self, key):
return 0
a = {'1':'a', '2':'b'}
b = DotMap(a, _dynamic=False)
o = OrderedDict(a)
test(**a)
test(**b.toDict())
test(**o)
test(**D())
# ordering
print('\n== ordering ==')
m = DotMap()
m.alpha = 1
m.bravo = 2
m.charlie = 3
m.delta = 4
for k,v in m.items():
print(k,v)
# subclassing
print('\n== subclassing ==')
d = DotMap()
o = OrderedDict()
print(isinstance(d, dict))
print(isinstance(o, dict))
e = DotMap(m)
print(e)
# deepcopy
print('\n== deepcopy ==')
import copy
t = DotMap()
t.a = 1
t.b = 3
f = copy.deepcopy(t)
t.a = 2
print(t)
print(f)
# copy order preservation
print('\n== copy order preservation ==')
t = DotMap()
t.a = 1
t.b = 2
t.c = 3
copies = []
print(id(t))
for i in range(3):
copyMap = copy.deepcopy(t)
copies.append(copyMap)
print(id(copyMap))
print()
for copyMap in copies:
for k,v in copyMap.items():
print(k,v)
print()
# bannerStr
print('\n== bannerStr ==')
t.cities.LA = 1
t.cities.DC = 2
t.cities.London.pop = 'many'
t.cities.London.weather = 'rain'
haiku = '\n'.join([
"Haikus are easy",
"But sometimes they don't make sense",
"Refrigerator",
])
t.haiku = haiku
t.teams.blue = 1
t.teams.red = 2
t.teams.green = 3
t.colors.blue = 1
t.colors.red = 2
t.colors.green = 3
t.numbers.short = list(range(4))
t.numbers.early = list(range(10))
t.numbers.backwards = list(range(10,-1,-1))
t.deepLog.deeper.Q = list(range(4))
print(t.bannerStr())
# sub-DotMap deepcopy
print('\n== sub-DotMap deepcopy ==')
import copy
l = []
d = {'d1': {'d2': ''}}
m = DotMap(d)
for i in range(3):
x = copy.deepcopy(m)
x.d1.d2 = i
l.append(x)
for m in l:
print(m)
# final print
print()
metaperl class print improvement
from __future__ import print_function
from collections import OrderedDict, MutableMapping
from pprint import pprint
from sys import version_info
from inspect import ismethod
# for debugging
def here(item=None):
out = 'here'
if item != None:
out += '({})'.format(item)
print(out)
class DotMap(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
self._map = OrderedDict()
self._dynamic = True
if kwargs:
if '_dynamic' in kwargs:
self._dynamic = kwargs['_dynamic']
if args:
d = args[0]
if isinstance(d, dict):
for k,v in self.__call_items(d):
if isinstance(v, dict):
v = DotMap(v, _dynamic=self._dynamic)
if type(v) is list:
l = []
for i in v:
n = i
if type(i) is dict:
n = DotMap(i, _dynamic=self._dynamic)
l.append(n)
v = l
self._map[k] = v
if kwargs:
for k,v in self.__call_items(kwargs):
if k is not '_dynamic':
self._map[k] = v
def __call_items(self, obj):
if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
return obj.iteritems()
else:
return obj.items()
def items(self):
return self.iteritems()
def iteritems(self):
return self.__call_items(self._map)
def __iter__(self):
return self._map.__iter__()
def next(self):
return self._map.next()
def __setitem__(self, k, v):
self._map[k] = v
def __getitem__(self, k):
if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
# automatically extend to new DotMap
self[k] = DotMap()
return self._map[k]
def __setattr__(self, k, v):
if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_'}:
super(DotMap, self).__setattr__(k,v)
else:
self[k] = v
def __getattr__(self, k):
if k == {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
super(DotMap, self).__getattr__(k)
else:
return self[k]
def __delattr__(self, key):
return self._map.__delitem__(key)
def __contains__(self, k):
return self._map.__contains__(k)
def __str__(self):
items = []
for k,v in self.__call_items(self._map):
# bizarre recursive assignment situation (why someone would do this is beyond me)
if id(v) == id(self):
items.append('{0}=DotMap(...)'.format(k))
else:
items.append('{0}={1}'.format(k, repr(v)))
joined = ', '.join(items)
out = '{0}({1})'.format(self.__class__.__name__, joined)
return out
def __repr__(self):
return str(self)
def toDict(self):
d = {}
for k,v in self.items():
if type(v) is DotMap:
# bizarre recursive assignment support
if id(v) == id(self):
v = d
else:
v = v.toDict()
elif type(v) is list:
l = []
for i in v:
n = i
if type(i) is DotMap:
n = i.toDict()
l.append(n)
v = l
d[k] = v
return d
def pprint(self):
pprint(self.toDict())
def empty(self):
return (not any(self))
# proper dict subclassing
def values(self):
return self._map.values()
# ipython support
def __dir__(self):
return self.keys()
@classmethod
def parseOther(self, other):
if type(other) is DotMap:
return other._map
else:
return other
def __cmp__(self, other):
other = DotMap.parseOther(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = DotMap.parseOther(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = DotMap.parseOther(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = DotMap.parseOther(other)
return self._map.__gt__(other)
def __le__(self, other):
other = DotMap.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = DotMap.parseOther(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = DotMap.parseOther(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def clear(self):
self._map.clear()
def copy(self):
return DotMap(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo=None):
return self.copy()
def get(self, key, default=None):
return self._map.get(key, default)
def has_key(self, key):
return key in self._map
def iterkeys(self):
return self._map.iterkeys()
def itervalues(self):
return self._map.itervalues()
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def popitem(self):
return self._map.popitem()
def setdefault(self, key, default=None):
self._map.setdefault(key, default)
def update(self, *args, **kwargs):
if len(args) != 0:
self._map.update(*args)
self._map.update(kwargs)
def viewitems(self):
return self._map.viewitems()
def viewkeys(self):
return self._map.viewkeys()
def viewvalues(self):
return self._map.viewvalues()
@classmethod
def fromkeys(cls, seq, value=None):
d = DotMap()
d._map = OrderedDict.fromkeys(seq, value)
return d
def __getstate__(self): return self.__dict__
def __setstate__(self, d): self.__dict__.update(d)
# bannerStr
def _getListStr(self,items):
out = '['
mid = ''
for i in items:
mid += ' {}\n'.format(i)
if mid != '':
mid = '\n' + mid
out += mid
out += ']'
return out
def _getValueStr(self,k,v):
outV = v
multiLine = len(str(v).split('\n')) > 1
if multiLine:
# push to next line
outV = '\n' + v
if type(v) is list:
outV = self._getListStr(v)
out = '{} {}'.format(k,outV)
return out
def _getSubMapDotList(self, pre, name, subMap):
outList = []
if pre == '':
pre = name
else:
pre = '{}.{}'.format(pre,name)
def stamp(pre,k,v):
valStr = self._getValueStr(k,v)
return '{}.{}'.format(pre, valStr)
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
subList = self._getSubMapDotList(pre,k,v)
outList.extend(subList)
else:
outList.append(stamp(pre,k,v))
return outList
def _getSubMapStr(self, name, subMap):
outList = ['== {} =='.format(name)]
for k,v in subMap.items():
if isinstance(v,DotMap) and v != DotMap():
# break down to dots
subList = self._getSubMapDotList('',k,v)
# add the divit
# subList = ['> {}'.format(i) for i in subList]
outList.extend(subList)
else:
out = self._getValueStr(k,v)
# out = '> {}'.format(out)
out = '{}'.format(out)
outList.append(out)
finalOut = '\n'.join(outList)
return finalOut
def bannerStr(self):
lines = []
previous = None
for k,v in self.items():
if previous == 'DotMap':
lines.append('-')
out = ''
if isinstance(v,DotMap):
name = k
subMap = v
out = self._getSubMapStr(name,subMap)
lines.append(out)
previous = 'DotMap'
else:
out = self._getValueStr(k,v)
lines.append(out)
previous = 'other'
lines.append('--')
s = '\n'.join(lines)
return s
if __name__ == '__main__':
# basics
print('\n== basics ==')
d = {
'a':1,
'b':2,
'subD': {'c':3, 'd':4}
}
dd = DotMap(d)
print(dd)
print(len(dd))
print(dd.copy())
print(dd)
print(OrderedDict.fromkeys([1,2,3]))
print(DotMap.fromkeys([1,2,3], 'a'))
print(dd.get('a'))
print(dd.get('f',33))
print(dd.get('f'))
print(dd.has_key('a'))
dd.update([('rat',5),('bum',4)], dog=7,cat=9)
dd.update({'lol':1,'ba':2})
print(dd)
print
for k in dd:
print(k)
print('a' in dd)
print('c' in dd)
dd.c.a = 1
print(dd.toDict())
dd.pprint()
print
print(dd.values())
dm = DotMap(name='Steve', job='programmer')
print(dm)
print(issubclass(dm.__class__, dict))
am = DotMap()
am.some.deep.path.cuz.we = 'can'
print(am)
del am.some.deep
print(am)
parentDict = {
'name': 'Father1',
'children': [
{'name': 'Child1'},
{'name': 'Child2'},
{'name': 'Child3'},
]
}
parent = DotMap(parentDict)
print([x.name for x in parent.children])
# pickle
print('\n== pickle ==')
import pickle
s = pickle.dumps(parent)
d = pickle.loads(s)
print(d)
# init from DotMap
print('\n== init from DotMap ==')
e = DotMap(d)
print(e)
# empty
print('\n== empty() ==')
d = DotMap()
print(d.empty())
d.a = 1
print(d.empty())
print()
x = DotMap({'a': 'b'})
print(x.b.empty()) # True (and creates empty DotMap)
print(x.b) # DotMap()
print(x.b.empty()) # also True
# _dynamic
print('\n== _dynamic ==')
d = DotMap()
d.still.works
print(d)
d = DotMap(_dynamic=False)
try:
d.no.creation
print(d)
except KeyError:
print('KeyError caught')
d = {'sub':{'a':1}}
dm = DotMap(d)
print(dm)
dm.still.works
dm.sub.still.works
print(dm)
dm2 = DotMap(d,_dynamic=False)
try:
dm.sub.yes.creation
print(dm)
dm2.sub.no.creation
print(dm)
except KeyError:
print('KeyError caught')
# _dynamic
print('\n== toDict() ==')
conf = DotMap()
conf.dep = DotMap(facts=DotMap(operating_systems=DotMap(os_CentOS_7=True), virtual_data_centers=[DotMap(name='vdc1', members=['sp1'], options=DotMap(secret_key='badsecret', description='My First VDC')), DotMap(name='vdc2', members=['sp2'], options=DotMap(secret_key='badsecret', description='My Second VDC'))], install_node='192.168.2.200', replication_group_defaults=DotMap(full_replication=False, enable_rebalancing=False, description='Default replication group description', allow_all_namespaces=False), node_defaults=DotMap(ntp_servers=['192.168.2.2'], ecs_root_user='root', dns_servers=['192.168.2.2'], dns_domain='local', ecs_root_pass='badpassword'), storage_pools=[DotMap(name='sp1', members=['192.168.2.220'], options=DotMap(ecs_block_devices=['/dev/vdb'], description='My First SP')), DotMap(name='sp2', members=['192.168.2.221'], options=DotMap(protected=False, ecs_block_devices=['/dev/vdb'], description='My Second SP'))], storage_pool_defaults=DotMap(cold_storage_enabled=False, protected=False, ecs_block_devices=['/dev/vdc'], description='Default storage pool description'), virtual_data_center_defaults=DotMap(secret_key='badsecret', description='Default virtual data center description'), management_clients=['192.168.2.0/24'], replication_groups=[DotMap(name='rg1', members=['vdc1', 'vdc2'], options=DotMap(description='My RG'))]), lawyers=DotMap(license_accepted=True))
print(conf.dep.toDict()['facts']['replication_groups'])
# recursive assignment
print('\n== recursive assignment ==')
# dict
d = dict()
d['a'] = 5
print(id(d))
d['recursive'] = d
print(d)
print(d['recursive']['recursive']['recursive'])
# DotMap
m = DotMap()
m.a = 5
print(id(m))
m.recursive = m
print(m.recursive.recursive.recursive)
print(m)
print(m.toDict())
# kwarg
print('\n== kwarg ==')
def test(**kwargs):
print(kwargs)
class D:
def keys(self):
return ['a', 'b']
def __getitem__(self, key):
return 0
a = {'1':'a', '2':'b'}
b = DotMap(a, _dynamic=False)
o = OrderedDict(a)
test(**a)
test(**b.toDict())
test(**o)
test(**D())
# ordering
print('\n== ordering ==')
m = DotMap()
m.alpha = 1
m.bravo = 2
m.charlie = 3
m.delta = 4
for k,v in m.items():
print(k,v)
# subclassing
print('\n== subclassing ==')
d = DotMap()
o = OrderedDict()
print(isinstance(d, dict))
print(isinstance(o, dict))
e = DotMap(m)
print(e)
# deepcopy
print('\n== deepcopy ==')
import copy
t = DotMap()
t.a = 1
t.b = 3
f = copy.deepcopy(t)
t.a = 2
print(t)
print(f)
# copy order preservation
print('\n== copy order preservation ==')
t = DotMap()
t.a = 1
t.b = 2
t.c = 3
copies = []
print(id(t))
for i in range(3):
copyMap = copy.deepcopy(t)
copies.append(copyMap)
print(id(copyMap))
print()
for copyMap in copies:
for k,v in copyMap.items():
print(k,v)
print()
# bannerStr
print('\n== bannerStr ==')
t.cities.LA = 1
t.cities.DC = 2
t.cities.London.pop = 'many'
t.cities.London.weather = 'rain'
haiku = '\n'.join([
"Haikus are easy",
"But sometimes they don't make sense",
"Refrigerator",
])
t.haiku = haiku
t.teams.blue = 1
t.teams.red = 2
t.teams.green = 3
t.colors.blue = 1
t.colors.red = 2
t.colors.green = 3
t.numbers.short = list(range(4))
t.numbers.early = list(range(10))
t.numbers.backwards = list(range(10,-1,-1))
t.deepLog.deeper.Q = list(range(4))
print(t.bannerStr())
# sub-DotMap deepcopy
print('\n== sub-DotMap deepcopy ==')
import copy
l = []
d = {'d1': {'d2': ''}}
m = DotMap(d)
for i in range(3):
x = copy.deepcopy(m)
x.d1.d2 = i
l.append(x)
for m in l:
print(m)
# final print
print()
|
# Help visualize histograms for hyperspectral images
import os
import sys
import numpy as np
import pandas as pd
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import fatal_error, params, color_palette
from plotnine import ggplot, aes, geom_line, scale_color_manual, theme_classic
from plantcv.plantcv.visualize import histogram
from plantcv.plantcv.hyperspectral import _find_closest
from scipy.spatial import distance
import colour
def _wavelength_to_rgb(wvlength):
## wavelength -> xyz
cmfs = colour.MSDS_CMFS["CIE 2012 10 Degree Standard Observer"]
xyz = colour.wavelength_to_XYZ(wvlength, cmfs)
## xyz -> rgb
# sRGB
illuminant_xyz = np.array([0.34570, 0.35850]) # D50
illuminant_rgb = np.array([0.31270, 0.32900]) # D65
matrix = np.array([[3.24062548, -1.53720797, -0.49862860],
[-0.96893071, 1.87575606, 0.04151752],
[0.05571012, -0.20402105, 1.05699594]])
# CIE RGB
# illuminant_xyz = np.array([0.34570, 0.35850]) # D50
# illuminant_rgb = np.array([1.00000 / 3.00000, 1.00000 / 3.00000]) # E
# matrix = np.array([[2.3706743, -0.9000405, -0.4706338],
# [-0.5138850, 1.4253036, 0.0885814],
# [0.0052982, -0.0146949, 1.0093968]])
rgb = colour.XYZ_to_RGB(xyz, illuminant_XYZ=illuminant_xyz, illuminant_RGB=illuminant_rgb,
chromatic_adaptation_transform="Bradford",
matrix_XYZ_to_RGB=matrix)
# rgb -> hex
color_vis = colour.notation.RGB_to_HEX(rgb)
return color_vis, rgb, xyz
def hyper_histogram(array, mask=None, bins=100, lower_bound=None, upper_bound=None,
title=None, wvlengths=[480, 550, 650]):
"""This function calculates the histogram of selected wavelengths hyperspectral images
The color of the histograms are based on the wavelength if the wavelength is in the range of visible spectrum;
otherwise, random colors are assigned
Inputs:
array = Hyperspectral data instance
mask = binary mask, if provided, calculate histogram from masked area only (default=None)
bins = divide the data into n evenly spaced bins (default=100)
lower_bound = the lower bound of the bins (x-axis min value) (default=None)
upper_bound = the upper bound of the bins (x-axis max value) (default=None)
title = a custom title for the plot (default=None)
wvlengths = (optional) list of wavelengths to show histograms (default = [480,550,650], i.e. only show
histograms for blue, green, and red bands)
Returns:
fig_hist = histogram figure
:param array: plantcv.plantcv.classes.Spectral_data
:param mask: numpy.ndarray
:param bins: int
:param lower_bound: None, int, float
:param upper_bound: None, int, float
:param title: None, str
:param wvlengths: list
:return fig_hist: plotnine.ggplot.ggplot
"""
vis_min = 400
vis_max = 680
# Always sort desired wavelengths
wvlengths.sort()
# Available wavelengths of the spectral data
wl_keys = array.wavelength_dict.keys()
wls = np.array([float(i) for i in wl_keys])
# Spectral resolution of the spectral data
diffs = [wls[i] - wls[i - 1] for i in range(1, len(wls))]
spc_res = sum(diffs) / len(diffs)
# Check if the distance is greater than 2x the spectral resolution
# If the distance > 2x resolution, it is considered being out of available ranges
checks = []
for i in range(0, len(wvlengths)):
checks.append(array.min_wavelength - wvlengths[i] > 2 * spc_res)
checks.append(wvlengths[i] - array.max_wavelength > 2 * spc_res)
if np.any(checks):
fatal_error(f"At least one band is too far from the available wavelength range: "
f"({array.min_wavelength},{array.max_wavelength})!")
# Find indices of bands whose wavelengths are closest to desired ones
match_ids = [_find_closest(wls, wv) for wv in wvlengths]
# Check if in the visible wavelengths range
ids_vis = [idx for (idx, wv) in enumerate(wvlengths) if vis_min <= wv <= vis_max]
# ids_inv = [idx for (idx, wv) in enumerate(wvlengths) if wv < 390 or wv > 830]
ids_uv = [idx for (idx, wv) in enumerate(wvlengths) if wv < vis_min]
ids_ir = [idx for (idx, wv) in enumerate(wvlengths) if wv > vis_max]
# # Prepare random colors in case there are invisible wavelengths
# colors = [tuple(x) for x in color_palette(len(wvlengths))]
# if len(ids_vis) < len(wvlengths):
# print("Warning: at least one of the desired wavelengths is not in the visible spectrum range!", file=sys.stderr)
#
# # If there are at least one band in the visible range, get the corresponding rgb value tuple based on the wavelength
# if len(ids_inv) < len(wvlengths):
# colors_vis = []
# colors_inv = colors
#
# # # Color matching function
# # cmfs = colour.MSDS_CMFS["CIE 2012 10 Degree Standard Observer"]
# # matrix = np.array([[3.24062548, -1.53720797, -0.49862860],
# # [-0.96893071, 1.87575606, 0.04151752],
# # [0.05571012, -0.20402105, 1.05699594]])
# for i in ids_vis:
# # # Convert wavelength to (R,G,B) colors
# # rgb = colour.XYZ_to_RGB(colour.wavelength_to_XYZ(wvlengths[i], cmfs),
# # illuminant_XYZ=np.array([0.9, 0.9]),
# # illuminant_RGB=np.array([0.9, 0.9]),
# # chromatic_adaptation_transform="Bradford",
# # matrix_XYZ_to_RGB=matrix)
# # # Set negative values to zero before scaling
# # rgb[np.where(rgb < 0)] = 0
# # # Convert float RGB to 8-bit unsigned integer
# # color_vis = colour.io.convert_bit_depth(rgb, "uint8")
# color_vis = _wavelength_to_rgb(wvlengths[i])
# colors_vis.append(color_vis)
# # Calculate the distances between every pair of (R,G,B) colors
# dists = distance.cdist(colors_vis, colors_inv, 'euclidean')
# # exclude those colors representing invisible bands that are "too close" to visible colors
# exclude = np.argmin(dists, axis=1)
# colors_inv = [c for (i, c) in enumerate(colors_inv) if i not in exclude]
# j_vis, j_inv = 0, 0
# colors = []
# for i in range(0, len(wvlengths)):
# if i in ids_vis:
# colors.append(colors_vis[j_vis])
# j_vis += 1
# else:
# colors.append(colors_inv[j_inv])
# j_inv += 1
#
# array_data = array.array_data
# Colors
color_scale_tmp = params.color_scale
# UV colors
params.color_scale = "twilight_shifted"
rgb_uv = color_palette(num=len(ids_uv), saved=False)
colors_uv = []
for rgb in rgb_uv:
rgb = np.array(rgb).astype(float) / 255
colors_uv.append(colour.notation.RGB_to_HEX(rgb))
# IR colors
params.color_scale = "inferno"
rgb_ir = color_palette(num=len(ids_ir), saved=False)
colors_ir = []
for rgb in rgb_ir:
rgb = np.array(rgb).astype(float) / 255
colors_ir.append(colour.notation.RGB_to_HEX(rgb))
params.color_scale = color_scale_tmp
# VIS colors
colors_vis = []
for i in ids_vis:
colors_vis.append(_wavelength_to_rgb(wvlength=wvlengths[i])[0])
array_data = array.array_data
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
histograms = dict()
hist_dataset = pd.DataFrame(columns=['reflectance'])
debug = params.debug
params.debug = None
colors = colors_uv + colors_vis + colors_ir
# Create a dataframe for all histogram related information (using the "histogram" function in "visualization" subpackage)
for i_wv, (wv, color) in enumerate(zip(wvlengths, colors)):
idx = match_ids[i_wv]
_, hist_data = histogram(array_data[:, :, idx], mask=mask, bins=bins, lower_bound=lower_bound,
upper_bound=upper_bound, title=title, hist_data=True)
histograms[wv] = {"label": wv, "graph_color": color, "reflectance": hist_data['pixel intensity'].tolist(),
"hist": hist_data['proportion of pixels (%)'].tolist()}
if i_wv == 0:
hist_dataset['reflectance'] = hist_data['pixel intensity'].tolist()
hist_dataset[wv] = hist_data['proportion of pixels (%)'].tolist()
# Make the histogram figure using plotnine
df_hist = pd.melt(hist_dataset, id_vars=['reflectance'], value_vars=wvlengths,
var_name='Wavelength (' + array.wavelength_units + ')', value_name='proportion of pixels (%)')
fig_hist = (ggplot(df_hist, aes(x='reflectance', y='proportion of pixels (%)',
color='Wavelength (' + array.wavelength_units + ')'))
+ geom_line()
+ scale_color_manual(colors, expand=(0, 0))
+ theme_classic()
)
params.debug = debug
_debug(fig_hist, filename=os.path.join(params.debug_outdir, str(params.device) + '_histogram.png'))
return fig_hist
Update hyper_histogram.py
use matplotlib `colormap` instead of the `colours` package to display colours based on wavelengths
# Help visualize histograms for hyperspectral images
import os
import numpy as np
import pandas as pd
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import fatal_error, params, color_palette
from plotnine import ggplot, aes, geom_line, scale_color_manual, theme_classic
from plantcv.plantcv.visualize import histogram
from plantcv.plantcv.hyperspectral import _find_closest
import math
def _get_color_dict_uv():
# uv
params.color_scale = "cool_r"
uv_wavelengths = np.arange(290, 444)
uv_colors_ = color_palette(num=256)
uv_colors_ = uv_colors_[0:len(uv_wavelengths)]
uv_colors_ = [tuple([xi / 255 for xi in x]) for x in uv_colors_[::-1]]
uv_colors = {}
for i, wv in enumerate(uv_wavelengths):
uv_colors[wv] = uv_colors_[i]
return uv_colors
def _get_color_dict_vis():
# vis
params.color_scale = "turbo"
vis_wavelengths = np.arange(445, 701)
vis_colors_ = color_palette(num=256)
vis_colors_ = [tuple([xi / 255 for xi in x]) for x in vis_colors_]
vis_colors = {}
for i, wv in enumerate(vis_wavelengths):
vis_colors[wv] = vis_colors_[i]
return vis_colors
def _get_color_dict_nir():
# nir
params.color_scale = "inferno"
nir_wavelengths = np.arange(701, 1725)
# nir_wavelengths = [_round_to_multiple(x, multiple=4, min_wv=701, max_wv=1725) for x in nir_wavelengths_]
nir_colors_ = color_palette(num=256)
nir_colors_ = [tuple([xi / 255 for xi in nir_colors_[math.floor(idx / 4)]]) for (idx, _) in
enumerate(nir_wavelengths)]
nir_colors = {}
for i, wv in enumerate(nir_wavelengths):
nir_colors[wv] = nir_colors_[i]
return nir_colors
def _rgb_to_webcode(rgb_values):
"""
RGB_value: a tuple of RGB values (0~1, float)
"""
webcode = "#"
for value in rgb_values:
code_ = hex(int(value*255)).replace('0x', '')
code = code_.upper() if len(code_) > 1 else '0{}'.format(code_.upper())
webcode += code
return webcode
def hyper_histogram(array, mask=None, bins=100, lower_bound=None, upper_bound=None,
title=None, wvlengths=[480, 550, 650]):
"""This function calculates the histogram of selected wavelengths hyperspectral images
The color of the histograms are based on the wavelength if the wavelength is in the range of visible spectrum;
otherwise, random colors are assigned
Inputs:
array = Hyperspectral data instance
mask = binary mask, if provided, calculate histogram from masked area only (default=None)
bins = divide the data into n evenly spaced bins (default=100)
lower_bound = the lower bound of the bins (x-axis min value) (default=None)
upper_bound = the upper bound of the bins (x-axis max value) (default=None)
title = a custom title for the plot (default=None)
wvlengths = (optional) list of wavelengths to show histograms (default = [480,550,670], i.e. only show
histograms for blue, green, and red bands)
Returns:
fig_hist = histogram figure
:param array: plantcv.plantcv.classes.Spectral_data
:param mask: numpy.ndarray
:param bins: int
:param lower_bound: None, int, float
:param upper_bound: None, int, float
:param title: None, str
:param wvlengths: list
:return fig_hist: plotnine.ggplot.ggplot
"""
# Always sort desired wavelengths
wvlengths.sort()
# Available wavelengths of the spectral data
wl_keys = array.wavelength_dict.keys()
wls = np.array([float(i) for i in wl_keys])
# Spectral resolution of the spectral data
diffs = [wls[i] - wls[i - 1] for i in range(1, len(wls))]
spc_res = sum(diffs) / len(diffs)
# Check if the distance is greater than 2x the spectral resolution
# If the distance > 2x resolution, it is considered being out of available ranges
checks = []
for i in range(0, len(wvlengths)):
checks.append(array.min_wavelength - wvlengths[i] > 2 * spc_res)
checks.append(wvlengths[i] - array.max_wavelength > 2 * spc_res)
if np.any(checks):
fatal_error(f"At least one band is too far from the available wavelength range: "
f"({array.min_wavelength},{array.max_wavelength})!")
# Find indices of bands whose wavelengths are closest to desired ones
match_ids = [_find_closest(wls, wv) for wv in wvlengths]
match_wls = [round(wls[i]) for i in match_ids]
# prepare color dictionary(ies)
color_dict = {}
if any(290 <= x < 445 for x in match_wls):
uv_colors = _get_color_dict_uv()
color_dict = {**color_dict, **uv_colors}
if any(445 <= x < 701 for x in match_wls):
vis_colors = _get_color_dict_vis()
color_dict = {**color_dict, **vis_colors}
if any(701 <= x < 1701 for x in match_wls):
nir_colors = _get_color_dict_nir()
color_dict = {**color_dict, **nir_colors}
colors_rgb = [color_dict[wv] for wv in match_wls]
colors_hex = [_rgb_to_webcode(x) for x in colors_rgb]
array_data = array.array_data
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
histograms = dict()
hist_dataset = pd.DataFrame(columns=['reflectance'])
debug = params.debug
params.debug = None
# Create a dataframe for all histogram related information (using the "histogram" function in "visualization" subpackage)
for i_wv, (wv, color) in enumerate(zip(wvlengths, colors_hex)):
idx = match_ids[i_wv]
_, hist_data = histogram(array_data[:, :, idx], mask=mask, bins=bins, lower_bound=lower_bound,
upper_bound=upper_bound, title=title, hist_data=True)
histograms[wv] = {"label": wv, "graph_color": color, "reflectance": hist_data['pixel intensity'].tolist(),
"hist": hist_data['proportion of pixels (%)'].tolist()}
if i_wv == 0:
hist_dataset['reflectance'] = hist_data['pixel intensity'].tolist()
hist_dataset[wv] = hist_data['proportion of pixels (%)'].tolist()
# Make the histogram figure using plotnine
df_hist = pd.melt(hist_dataset, id_vars=['reflectance'], value_vars=wvlengths,
var_name='Wavelength (' + array.wavelength_units + ')', value_name='proportion of pixels (%)')
fig_hist = (ggplot(df_hist, aes(x='reflectance', y='proportion of pixels (%)',
color='Wavelength (' + array.wavelength_units + ')'))
+ geom_line()
+ scale_color_manual(colors_hex, expand=(0, 0))
+ theme_classic()
)
params.debug = debug
_debug(fig_hist, filename=os.path.join(params.debug_outdir, str(params.device) + '_histogram.png'))
return fig_hist
|
__author__ = 'benjamin.c.yan'
__version__ = "0.0.5"
beautify code
__author__ = 'benjamin.c.yan'
__version__ = "0.0.5"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
'''
A Bookmark attaches a set of Tags and a single Object ('class Filename' in the
included example).
'''
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.associationproxy import association_proxy
from kcl.sqlalchemy.get_one_or_create import get_one_or_create
from kcl.sqlalchemy.BaseMixin import BASE
# Timestamps on bookmarks results in 'duplicate' bookmarks
# so dont put timestamps on bookmarks
tagbookmarks_table = \
Table('tagbookmarks', BASE.metadata,
Column('bookmark_id',
Integer,
ForeignKey("bookmark.id"),
primary_key=True),
Column('tag_id',
Integer,
ForeignKey("tag.id"),
primary_key=True),
UniqueConstraint('bookmark_id', 'tag_id'))
@classmethod
def construct(cls, session, tag, **kwargs):
result = get_one_or_create(session, cls, **kwargs)
result.tag_rel.add(tag)
return result
def bookmark_repr(self):
target_name = str(getattr(self, self.target_name))
target_name_placeholder = str(getattr(self, self.target_name_placeholder))
if target_name_placeholder:
return target_name + '#' + target_name_placeholder + ' ' + str(self.tags)
return target_name + ' ' + str(self.tags)
class BookmarkClassConstructor():
def __new__(cls, mapper_to_bookmark, mapper_to_bookmark_placeholder=False):
future_class_attr = {}
future_class_attr['id'] = Column(Integer, primary_key=True)
future_class_attr['tag_rel'] = relationship("Tag",
secondary=lambda: tagbookmarks_table,
collection_class=set,
backref=backref('bookmarks'))
future_class_attr['tags'] = association_proxy('tag_rel', 'tag')
target_class_name = mapper_to_bookmark.__name__
target_name = target_class_name.lower().split('.')[-1] # 'filename' usually
future_class_attr[target_name+'_id'] = Column(Integer, ForeignKey(target_name+'.id'), unique=False, nullable=False)
future_class_attr[target_name] = relationship(target_class_name, backref='bookmarks')
future_class_attr['target_class_name'] = target_class_name
future_class_attr['target_name'] = target_name
if mapper_to_bookmark_placeholder:
target_class_name_placeholder = mapper_to_bookmark_placeholder.__name__
target_name_placeholder = target_class_name_placeholder.lower().split('.')[-1] # byteoffset in the filename case
future_class_attr[target_name_placeholder+'_id'] = Column(Integer, ForeignKey(target_name_placeholder+'.id'), unique=False, nullable=True)
future_class_attr[target_name_placeholder] = relationship(target_class_name_placeholder, backref='bookmarks')
future_class_attr['target_class_name_placeholder'] = target_class_name_placeholder
future_class_attr['target_name_placeholder'] = target_name_placeholder
future_class_attr['construct'] = construct
future_class_attr['__repr__'] = bookmark_repr
return type('Bookmark', (BASE,), future_class_attr)
auto-commit
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
'''
A Bookmark attaches a set of Tags and a single Object ('class Filename' in the
included example).
'''
from sqlalchemy import Table
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import UniqueConstraint
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.associationproxy import association_proxy
from kcl.sqlalchemy.get_one_or_create import get_one_or_create
from kcl.sqlalchemy.BaseMixin import BASE
# Timestamps on bookmarks results in 'duplicate' bookmarks
# so dont put timestamps on bookmarks
tagbookmarks_table = \
Table('tagbookmarks', BASE.metadata,
Column('bookmark_id',
Integer,
ForeignKey("bookmark.id"),
primary_key=True),
Column('tag_id',
Integer,
ForeignKey("tag.id"),
primary_key=True),
UniqueConstraint('bookmark_id', 'tag_id'))
@classmethod
def construct(cls, session, tag, **kwargs):
result = get_one_or_create(session, cls, **kwargs)
result.tag_rel.add(tag)
return result
def bookmark_repr(self):
target_name = str(getattr(self, self.target_name))
target_name_placeholder = getattr(self, self.target_name_placeholder)
if target_name_placeholder:
return target_name + '#' + str(target_name_placeholder) + ' ' + str(self.tags)
return target_name + ' ' + str(self.tags)
class BookmarkClassConstructor():
def __new__(cls, mapper_to_bookmark, mapper_to_bookmark_placeholder=False):
future_class_attr = {}
future_class_attr['id'] = Column(Integer, primary_key=True)
future_class_attr['tag_rel'] = relationship("Tag",
secondary=lambda: tagbookmarks_table,
collection_class=set,
backref=backref('bookmarks'))
future_class_attr['tags'] = association_proxy('tag_rel', 'tag')
target_class_name = mapper_to_bookmark.__name__
target_name = target_class_name.lower().split('.')[-1] # 'filename' usually
future_class_attr[target_name+'_id'] = Column(Integer, ForeignKey(target_name+'.id'), unique=False, nullable=False)
future_class_attr[target_name] = relationship(target_class_name, backref='bookmarks')
future_class_attr['target_class_name'] = target_class_name
future_class_attr['target_name'] = target_name
if mapper_to_bookmark_placeholder:
target_class_name_placeholder = mapper_to_bookmark_placeholder.__name__
target_name_placeholder = target_class_name_placeholder.lower().split('.')[-1] # byteoffset in the filename case
future_class_attr[target_name_placeholder+'_id'] = Column(Integer, ForeignKey(target_name_placeholder+'.id'), unique=False, nullable=True)
future_class_attr[target_name_placeholder] = relationship(target_class_name_placeholder, backref='bookmarks')
future_class_attr['target_class_name_placeholder'] = target_class_name_placeholder
future_class_attr['target_name_placeholder'] = target_name_placeholder
future_class_attr['construct'] = construct
future_class_attr['__repr__'] = bookmark_repr
return type('Bookmark', (BASE,), future_class_attr)
|
import sys
import functools
from importlib import import_module
from .exception import DoesNotExist, NotConnected
OUTGOING, INCOMING, EITHER = 1, -1, 0
# check origin node is saved and not deleted
def check_origin(fn):
fn_name = fn.func_name if hasattr(fn, 'func_name') else fn.__name__
@functools.wraps(fn)
def checker(self, *args, **kwargs):
self.origin._pre_action_check(self.name + '.' + fn_name)
return fn(self, *args, **kwargs)
return checker
def rel_helper(**rel):
if rel['direction'] == OUTGOING:
stmt = '-[{0}:{1}]->'
elif rel['direction'] == INCOMING:
stmt = '<-[{0}:{1}]-'
else:
stmt = '-[{0}:{1}]-'
ident = rel['ident'] if 'ident' in rel else ''
stmt = stmt.format(ident, rel['relation_type'])
return " ({0}){1}({2})".format(rel['lhs'], stmt, rel['rhs'])
class RelationshipManager(object):
def __init__(self, definition, origin):
self.direction = definition['direction']
self.relation_type = definition['relation_type']
self.label_map = definition['label_map']
self.definition = definition
self.origin = origin
def __str__(self):
direction = 'either'
if self.direction == OUTGOING:
direction = 'a outgoing'
elif self.direction == INCOMING:
direction = 'a incoming'
return "{0} in {1} direction of type {2} on node ({3}) of class '{4}'".format(
self.description, direction,
self.relation_type, self.origin._id, self.origin.__class__.__name__)
@check_origin
def __bool__(self):
return len(self) > 0
@check_origin
def __nonzero__(self):
return len(self) > 0
@check_origin
def __len__(self):
return len(self.origin.traverse(self.name))
@property
def client(self):
return self.origin.client
@check_origin
def count(self):
return self.__len__()
@check_origin
def all(self):
return self.origin.traverse(self.name).run()
@check_origin
def get(self, **kwargs):
result = self.search(**kwargs)
if len(result) == 1:
return result[0]
if len(result) > 1:
raise Exception("Multiple items returned, use search?")
if not result:
raise DoesNotExist("No items exist for the specified arguments")
@check_origin
def search(self, **kwargs):
t = self.origin.traverse(self.name)
for field, value in kwargs.items():
t.where(field, '=', value)
return t.run()
@check_origin
def is_connected(self, obj):
self._check_node(obj)
rel = rel_helper(lhs='a', rhs='b', ident='r', **self.definition)
q = "START a=node({self}), b=node({them}) MATCH" + rel + "RETURN count(r)"
return bool(self.origin.cypher(q, {'them': obj._id})[0][0][0])
def _check_node(self, obj):
"""check for valid node i.e correct class and is saved"""
for label, cls in self.label_map.items():
if obj.__class__ is cls:
if not hasattr(obj, '_id'):
raise ValueError("Can't preform operation on unsaved node " + repr(obj))
return
allowed_cls = ", ".join([(tcls if isinstance(tcls, str) else tcls.__name__)
for tcls, _ in self.label_map.items()])
raise ValueError("Expected nodes of class "
+ allowed_cls + " got " + repr(obj)
+ " see relationship definition in " + self.origin.__class__.__name__)
@check_origin
def connect(self, obj, properties=None):
self._check_node(obj)
if not self.definition['model'] and properties:
raise NotImplementedError("Relationship properties without " +
"using a relationship model is no longer supported")
new_rel = rel_helper(lhs='us', rhs='them', ident='r', **self.definition)
q = "START them=node({them}), us=node({self}) CREATE UNIQUE" + new_rel
params = {'them': obj._id}
if not properties and not self.definition['model']:
self.origin.cypher(q, params)
return True
rel_model = self.definition['model']
# need to generate defaults etc to create fake instance
tmp = rel_model(**properties) if properties else rel_model()
for p, v in rel_model.deflate(tmp.__properties__).items():
params['place_holder_' + p] = v
q += " SET r." + p + " = {place_holder_" + p + "}"
rel_ = self.origin.cypher(q + " RETURN r", params)[0][0][0]
rel_instance = rel_model.inflate(rel_)
if self.definition['direction'] == INCOMING:
rel_instance._start_node_class = obj.__class__
rel_instance._end_node_class = self.origin.__class__
else:
rel_instance._start_node_class = self.origin.__class__
rel_instance._end_node_class = obj.__class__
self.origin.cypher(q, params)
return rel_instance
@check_origin
def relationship(self, obj):
"""relationship: node"""
self._check_node(obj)
if not 'model' in self.definition:
raise NotImplemented("'relationship' method only available on relationships"
+ " that have a model defined")
rel_model = self.definition['model']
new_rel = rel_helper(lhs='us', rhs='them', ident='r', **self.definition)
q = "START them=node({them}), us=node({self}) MATCH " + new_rel + " RETURN r"
rel = self.origin.cypher(q, {'them': obj._id})[0][0][0]
if not rel:
return
rel_instance = rel_model.inflate(rel)
if self.definition['direction'] == INCOMING:
rel_instance._start_node_class = obj.__class__
rel_instance._end_node_class = self.origin.__class__
else:
rel_instance._start_node_class = self.origin.__class__
rel_instance._end_node_class = obj.__class__
return rel_instance
@check_origin
def reconnect(self, old_obj, new_obj):
"""reconnect: old_node, new_node"""
self._check_node(old_obj)
self._check_node(new_obj)
if old_obj._id == new_obj._id:
return
old_rel = rel_helper(lhs='us', rhs='old', ident='r', **self.definition)
# get list of properties on the existing rel
result, meta = self.origin.cypher("START us=node({self}), old=node({old}) MATCH " + old_rel + " RETURN r",
{'old': old_obj._id})
if result:
existing_properties = result[0][0]._properties.keys()
else:
raise NotConnected('reconnect', self.origin, old_obj)
# remove old relationship and create new one
new_rel = rel_helper(lhs='us', rhs='new', ident='r2', **self.definition)
q = "START us=node({self}), old=node({old}), new=node({new}) MATCH " + old_rel
q += " CREATE UNIQUE" + new_rel
# copy over properties if we have
for p in existing_properties:
q += " SET r2.{} = r.{}".format(p, p)
q += " WITH r DELETE r"
self.origin.cypher(q, {'old': old_obj._id, 'new': new_obj._id})
@check_origin
def disconnect(self, obj):
rel = rel_helper(lhs='a', rhs='b', ident='r', **self.definition)
q = "START a=node({self}), b=node({them}) MATCH " + rel + " DELETE r"
self.origin.cypher(q, {'them': obj._id})
@check_origin
def single(self):
nodes = self.origin.traverse(self.name).limit(1).run()
return nodes[0] if nodes else None
class RelationshipDefinition(object):
def __init__(self, relation_type, cls_name, direction, manager=RelationshipManager, model=None):
self.module_name = sys._getframe(4).f_globals['__name__']
self.module_file = sys._getframe(4).f_globals['__file__']
self.node_class = cls_name
self.manager = manager
self.definition = {}
self.definition['relation_type'] = relation_type
self.definition['direction'] = direction
self.definition['model'] = model
def _lookup(self, name):
if name.find('.') == -1:
module = self.module_name
else:
module, _, name = name.rpartition('.')
if not module in sys.modules:
# yet another hack to get around python semantics
# __name__ is the namespace of the parent module for __init__.py files,
# and the namespace of the current module for other .py files,
# therefore there's a need to define the namespace differently for
# these two cases in order for . in relative imports to work correctly
# (i.e. to mean the same thing for both cases).
# For example in the comments below, namespace == myapp, always
if '__init__.py' in self.module_file:
# e.g. myapp/__init__.py -[__name__]-> myapp
namespace = self.module_name
else:
# e.g. myapp/models.py -[__name__]-> myapp.models
namespace = self.module_name.rpartition('.')[0]
# load a module from a namespace (e.g. models from myapp)
if module:
module = import_module(module, namespace).__name__
# load the namespace itself (e.g. myapp)
# (otherwise it would look like import . from myapp)
else:
module = import_module(namespace).__name__
return getattr(sys.modules[module], name)
def build_manager(self, origin, name):
# get classes for related nodes
if isinstance(self.node_class, list):
node_classes = [self._lookup(cls) if isinstance(cls, (str,)) else cls
for cls in self.node_class]
else:
node_classes = [self._lookup(self.node_class)
if isinstance(self.node_class, (str,)) else self.node_class]
# build label map
self.definition['label_map'] = dict(zip([c.__label__
for c in node_classes], node_classes))
rel = self.manager(self.definition, origin)
rel.name = name
return rel
class ZeroOrMore(RelationshipManager):
description = "zero or more relationships"
def _relate(cls_name, direction, rel_type, cardinality=None, model=None):
if not isinstance(cls_name, (str, list, object)):
raise ValueError('Expected class name or list of class names, got ' + repr(cls_name))
from .relationship import StructuredRel
if model and not issubclass(model, (StructuredRel,)):
raise ValueError('model must be a StructuredRel')
return RelationshipDefinition(rel_type, cls_name, direction, cardinality, model)
def RelationshipTo(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, OUTGOING, rel_type, cardinality, model)
def RelationshipFrom(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, INCOMING, rel_type, cardinality, model)
def Relationship(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, EITHER, rel_type, cardinality, model)
Add deprecation...
import sys
import functools
from importlib import import_module
from .exception import DoesNotExist, NotConnected
from .util import deprecated
OUTGOING, INCOMING, EITHER = 1, -1, 0
# check origin node is saved and not deleted
def check_origin(fn):
fn_name = fn.func_name if hasattr(fn, 'func_name') else fn.__name__
@functools.wraps(fn)
def checker(self, *args, **kwargs):
self.origin._pre_action_check(self.name + '.' + fn_name)
return fn(self, *args, **kwargs)
return checker
def rel_helper(**rel):
if rel['direction'] == OUTGOING:
stmt = '-[{0}:{1}]->'
elif rel['direction'] == INCOMING:
stmt = '<-[{0}:{1}]-'
else:
stmt = '-[{0}:{1}]-'
ident = rel['ident'] if 'ident' in rel else ''
stmt = stmt.format(ident, rel['relation_type'])
return " ({0}){1}({2})".format(rel['lhs'], stmt, rel['rhs'])
class RelationshipManager(object):
def __init__(self, definition, origin):
self.direction = definition['direction']
self.relation_type = definition['relation_type']
self.label_map = definition['label_map']
self.definition = definition
self.origin = origin
def __str__(self):
direction = 'either'
if self.direction == OUTGOING:
direction = 'a outgoing'
elif self.direction == INCOMING:
direction = 'a incoming'
return "{0} in {1} direction of type {2} on node ({3}) of class '{4}'".format(
self.description, direction,
self.relation_type, self.origin._id, self.origin.__class__.__name__)
@check_origin
def __bool__(self):
return len(self) > 0
@check_origin
def __nonzero__(self):
return len(self) > 0
@check_origin
def __len__(self):
return len(self.origin.traverse(self.name))
@property
def client(self):
return self.origin.client
@check_origin
def count(self):
return self.__len__()
@check_origin
def all(self):
return self.origin.traverse(self.name).run()
@check_origin
def get(self, **kwargs):
result = self.search(**kwargs)
if len(result) == 1:
return result[0]
if len(result) > 1:
raise Exception("Multiple items returned, use search?")
if not result:
raise DoesNotExist("No items exist for the specified arguments")
@check_origin
@deprecated("search() is now deprecated please use filter() and exclude()")
def search(self, **kwargs):
t = self.origin.traverse(self.name)
for field, value in kwargs.items():
t.where(field, '=', value)
return t.run()
@check_origin
def is_connected(self, obj):
self._check_node(obj)
rel = rel_helper(lhs='a', rhs='b', ident='r', **self.definition)
q = "START a=node({self}), b=node({them}) MATCH" + rel + "RETURN count(r)"
return bool(self.origin.cypher(q, {'them': obj._id})[0][0][0])
def _check_node(self, obj):
"""check for valid node i.e correct class and is saved"""
for label, cls in self.label_map.items():
if obj.__class__ is cls:
if not hasattr(obj, '_id'):
raise ValueError("Can't preform operation on unsaved node " + repr(obj))
return
allowed_cls = ", ".join([(tcls if isinstance(tcls, str) else tcls.__name__)
for tcls, _ in self.label_map.items()])
raise ValueError("Expected nodes of class "
+ allowed_cls + " got " + repr(obj)
+ " see relationship definition in " + self.origin.__class__.__name__)
@check_origin
def connect(self, obj, properties=None):
self._check_node(obj)
if not self.definition['model'] and properties:
raise NotImplementedError("Relationship properties without " +
"using a relationship model is no longer supported")
new_rel = rel_helper(lhs='us', rhs='them', ident='r', **self.definition)
q = "START them=node({them}), us=node({self}) CREATE UNIQUE" + new_rel
params = {'them': obj._id}
if not properties and not self.definition['model']:
self.origin.cypher(q, params)
return True
rel_model = self.definition['model']
# need to generate defaults etc to create fake instance
tmp = rel_model(**properties) if properties else rel_model()
for p, v in rel_model.deflate(tmp.__properties__).items():
params['place_holder_' + p] = v
q += " SET r." + p + " = {place_holder_" + p + "}"
rel_ = self.origin.cypher(q + " RETURN r", params)[0][0][0]
rel_instance = rel_model.inflate(rel_)
if self.definition['direction'] == INCOMING:
rel_instance._start_node_class = obj.__class__
rel_instance._end_node_class = self.origin.__class__
else:
rel_instance._start_node_class = self.origin.__class__
rel_instance._end_node_class = obj.__class__
self.origin.cypher(q, params)
return rel_instance
@check_origin
def relationship(self, obj):
"""relationship: node"""
self._check_node(obj)
if not 'model' in self.definition:
raise NotImplemented("'relationship' method only available on relationships"
+ " that have a model defined")
rel_model = self.definition['model']
new_rel = rel_helper(lhs='us', rhs='them', ident='r', **self.definition)
q = "START them=node({them}), us=node({self}) MATCH " + new_rel + " RETURN r"
rel = self.origin.cypher(q, {'them': obj._id})[0][0][0]
if not rel:
return
rel_instance = rel_model.inflate(rel)
if self.definition['direction'] == INCOMING:
rel_instance._start_node_class = obj.__class__
rel_instance._end_node_class = self.origin.__class__
else:
rel_instance._start_node_class = self.origin.__class__
rel_instance._end_node_class = obj.__class__
return rel_instance
@check_origin
def reconnect(self, old_obj, new_obj):
"""reconnect: old_node, new_node"""
self._check_node(old_obj)
self._check_node(new_obj)
if old_obj._id == new_obj._id:
return
old_rel = rel_helper(lhs='us', rhs='old', ident='r', **self.definition)
# get list of properties on the existing rel
result, meta = self.origin.cypher("START us=node({self}), old=node({old}) MATCH " + old_rel + " RETURN r",
{'old': old_obj._id})
if result:
existing_properties = result[0][0]._properties.keys()
else:
raise NotConnected('reconnect', self.origin, old_obj)
# remove old relationship and create new one
new_rel = rel_helper(lhs='us', rhs='new', ident='r2', **self.definition)
q = "START us=node({self}), old=node({old}), new=node({new}) MATCH " + old_rel
q += " CREATE UNIQUE" + new_rel
# copy over properties if we have
for p in existing_properties:
q += " SET r2.{} = r.{}".format(p, p)
q += " WITH r DELETE r"
self.origin.cypher(q, {'old': old_obj._id, 'new': new_obj._id})
@check_origin
def disconnect(self, obj):
rel = rel_helper(lhs='a', rhs='b', ident='r', **self.definition)
q = "START a=node({self}), b=node({them}) MATCH " + rel + " DELETE r"
self.origin.cypher(q, {'them': obj._id})
@check_origin
def single(self):
nodes = self.origin.traverse(self.name).limit(1).run()
return nodes[0] if nodes else None
class RelationshipDefinition(object):
def __init__(self, relation_type, cls_name, direction, manager=RelationshipManager, model=None):
self.module_name = sys._getframe(4).f_globals['__name__']
self.module_file = sys._getframe(4).f_globals['__file__']
self.node_class = cls_name
self.manager = manager
self.definition = {}
self.definition['relation_type'] = relation_type
self.definition['direction'] = direction
self.definition['model'] = model
def _lookup(self, name):
if name.find('.') == -1:
module = self.module_name
else:
module, _, name = name.rpartition('.')
if not module in sys.modules:
# yet another hack to get around python semantics
# __name__ is the namespace of the parent module for __init__.py files,
# and the namespace of the current module for other .py files,
# therefore there's a need to define the namespace differently for
# these two cases in order for . in relative imports to work correctly
# (i.e. to mean the same thing for both cases).
# For example in the comments below, namespace == myapp, always
if '__init__.py' in self.module_file:
# e.g. myapp/__init__.py -[__name__]-> myapp
namespace = self.module_name
else:
# e.g. myapp/models.py -[__name__]-> myapp.models
namespace = self.module_name.rpartition('.')[0]
# load a module from a namespace (e.g. models from myapp)
if module:
module = import_module(module, namespace).__name__
# load the namespace itself (e.g. myapp)
# (otherwise it would look like import . from myapp)
else:
module = import_module(namespace).__name__
return getattr(sys.modules[module], name)
def build_manager(self, origin, name):
# get classes for related nodes
if isinstance(self.node_class, list):
node_classes = [self._lookup(cls) if isinstance(cls, (str,)) else cls
for cls in self.node_class]
else:
node_classes = [self._lookup(self.node_class)
if isinstance(self.node_class, (str,)) else self.node_class]
# build label map
self.definition['label_map'] = dict(zip([c.__label__
for c in node_classes], node_classes))
rel = self.manager(self.definition, origin)
rel.name = name
return rel
class ZeroOrMore(RelationshipManager):
description = "zero or more relationships"
def _relate(cls_name, direction, rel_type, cardinality=None, model=None):
if not isinstance(cls_name, (str, list, object)):
raise ValueError('Expected class name or list of class names, got ' + repr(cls_name))
from .relationship import StructuredRel
if model and not issubclass(model, (StructuredRel,)):
raise ValueError('model must be a StructuredRel')
return RelationshipDefinition(rel_type, cls_name, direction, cardinality, model)
def RelationshipTo(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, OUTGOING, rel_type, cardinality, model)
def RelationshipFrom(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, INCOMING, rel_type, cardinality, model)
def Relationship(cls_name, rel_type, cardinality=ZeroOrMore, model=None):
return _relate(cls_name, EITHER, rel_type, cardinality, model)
|
# coding=utf-8
"""This module, entity_database.py, contains a database api layer for entity objects."""
from quasar_source_code.database_api import postgresql_api as db_api
from quasar_source_code.database_api import database_tables as db_t
from quasar_source_code.entities import base_entity as be
from quasar_source_code.universal_code import debugging as dbg
from quasar_source_code.entities.entity_manager import EntityManager
# Python PostgreSQL database library.
import psycopg2
# Python objects to binary data.
import dill
''' __ ___ __ __ ___ __
| \ /\ | /\ |__) /\ /__` |__ /\ |__) | .
|__/ /~~\ | /~~\ |__) /~~\ .__/ |___ /~~\ | | .
'''
# Utility indexes.
INDEX_OWNER_NAME = 0
INDEX_OWNER_PASSWORD = 1
INDEX_OWNER_EMAIL = 2
INDEX_OWNER_ID = 3
INDEX_OWNER_MANAGER_ID = 4
class EntityDatabaseAPI(object):
"""An API for Entity database operations."""
def __init__(self, debug=False):
self._debug = debug
self._api = db_api.PostgreSQLAPI()
self._connected = False
# Owners table.
self._owners = db_t.DatabaseTable('owners', self._api)
self._owners.add_table_field(db_t.TableFieldString('name', 100))
self._owners.add_table_field(db_t.TableFieldString('password', 100))
self._owners.add_table_field(db_t.TableFieldString('email', 100))
self._owners.add_table_field(db_t.TableFieldInteger('owner_id', maximum_value=1000000, auto_increment=True))
self._owners.add_table_field(db_t.TableFieldInteger('manager_id', maximum_value=1000000, auto_increment=True))
# Table containing entity_managers which contain entities.
self._entity_managers = db_t.DatabaseTable('entity_managers', self._api)
self._entity_managers.add_table_field(db_t.TableFieldInteger('manager_id', maximum_value=1000000, auto_increment=False))
self._entity_managers.add_table_field(db_t.TableFieldBinary('manager'))
# TODO : Eventually move the location of the health checks call.
self.health_checks()
def health_checks(self):
"""Runs database health checks and applies automatic fixes."""
# Connects to database if not yet connected.
self._check_if_connected()
# Check that all our needed tables exist.
self._owners.create_if_does_not_exist()
self._entity_managers.create_if_does_not_exist()
def save_entity_manager(self, entity_manager):
"""Saves an entity manager object into the database."""
# TODO : Make this method more elegant.
file_name = 'entity_manager_' + str(entity_manager.manager_id) + '.db'
with open(file_name, 'wb') as f:
dill.dump(entity_manager, f)
file = open(file_name, 'rb')
cursor = self._api.get_cursor()
file_data = file.read()
file.close()
self._entity_managers.delete_row_with_value('manager_id', entity_manager.manager_id)
cursor.execute('INSERT INTO entity_managers(manager_id, manager) VALUES (%s, %s);', (entity_manager.manager_id, psycopg2.Binary(file_data)))
self._api.commit()
def create_owner(self, name: str, password: str, email: str):
"""Places an owner into the owners table."""
# TODO: Remove the place holder values for manager_id
self._owners.insert_row({'name': name, 'password': password, 'email': email})
# Get the owners info.
owner = self.get_owner(name)
# Create the manager here.
manager = EntityManager(manager_id=owner[INDEX_OWNER_MANAGER_ID], owner_id=owner[INDEX_OWNER_ID])
self.save_entity_manager(manager)
def get_all_owners(self):
"""Returns a list of all the owners."""
owners = self._owners.get_row_values()
return owners
def get_owner(self, owner_name):
"""Returns the data for an owner, found by owner name."""
# TODO : Eventually just make this into a database query...
owners = self._owners.get_row_values()
for o in owners:
if o[0] == owner_name:
return o
return None
def get_entity_manager(self, manager_id=-1):
"""Returns the Entity Manager from the database by id, returns None if not found."""
# TODO : Make this a single query...
results = self._entity_managers.get_row_values()
for em in results:
if em[0] == manager_id:
return dill.loads(em[1].tobytes())
#self._api.execute_query('SET CLIENT_ENCODING TO LATIN1', save=True)
#result = self._entity_managers.get_single_value('manager', 'manager_id', manager_id)
#if result is not None:
# return dill.loads(result.tobytes())
return None
def _check_if_connected(self):
"""Connects if the database is not connected."""
if not self._connected:
if self._debug:
dbg.print_dashed_line()
print('Connecting to the database...', end='')
self._api.connect()
self._connected = True
if self._debug:
print('connected!')
dbg.print_dashed_line()
def terminate(self):
"""Terminates the database connection if there is one."""
if self._connected:
self._api.terminate()
# This function to be manually ran only.
def _full_reset(self):
"""Fully resets the database data for entities."""
self._owners.delete_if_exists()
self._owners.create_if_does_not_exist()
self._entity_managers.delete_if_exists()
self._entity_managers.create_if_does_not_exist()
#e = EntityDatabaseAPI(debug=True)
#print(e._api.execute_query_and_get_all_results('SELECT * FROM entity_managers'))
#print(e._api.get_all_table_names())
#print(e.get_all_owners())
#e._full_reset()
#print(e._owners.get_row_values())
#print(e._entity_managers.get_row_values())
general updating 12
# coding=utf-8
"""This module, entity_database.py, contains a database api layer for entity objects."""
from quasar_source_code.database_api import postgresql_api as db_api
from quasar_source_code.database_api import database_tables as db_t
from quasar_source_code.entities import base_entity as be
from quasar_source_code.universal_code import debugging as dbg
from quasar_source_code.entities.entity_manager import EntityManager
# Python PostgreSQL database library.
import psycopg2
# Python objects to binary data.
import dill
''' __ ___ __ __ ___ __
| \ /\ | /\ |__) /\ /__` |__ /\ |__) | .
|__/ /~~\ | /~~\ |__) /~~\ .__/ |___ /~~\ | | .
'''
# Utility indexes.
INDEX_OWNER_NAME = 0
INDEX_OWNER_PASSWORD = 1
INDEX_OWNER_EMAIL = 2
INDEX_OWNER_ID = 3
INDEX_OWNER_MANAGER_ID = 4
class EntityDatabaseAPI(object):
"""An API for Entity database operations."""
def __init__(self, debug=False):
self._debug = debug
self._api = db_api.PostgreSQLAPI()
self._connected = False
# Owners table.
self._owners = db_t.DatabaseTable('owners', self._api)
self._owners.add_table_field(db_t.TableFieldString('name', 100))
self._owners.add_table_field(db_t.TableFieldString('password', 100))
self._owners.add_table_field(db_t.TableFieldString('email', 100))
self._owners.add_table_field(db_t.TableFieldInteger('owner_id', maximum_value=1000000, auto_increment=True))
self._owners.add_table_field(db_t.TableFieldInteger('manager_id', maximum_value=1000000, auto_increment=True))
# Table containing entity_managers which contain entities.
self._entity_managers = db_t.DatabaseTable('entity_managers', self._api)
self._entity_managers.add_table_field(db_t.TableFieldInteger('manager_id', maximum_value=1000000, auto_increment=False))
self._entity_managers.add_table_field(db_t.TableFieldBinary('manager'))
# TODO : Eventually move the location of the health checks call.
self.health_checks()
# Owner cache.
self.cache_owners = self._owners.get_row_values()
def health_checks(self):
"""Runs database health checks and applies automatic fixes."""
# Connects to database if not yet connected.
self._check_if_connected()
# Check that all our needed tables exist.
self._owners.create_if_does_not_exist()
self._entity_managers.create_if_does_not_exist()
def save_entity_manager(self, entity_manager):
"""Saves an entity manager object into the database."""
# TODO : Make this method more elegant.
file_name = 'entity_manager_' + str(entity_manager.manager_id) + '.db'
with open(file_name, 'wb') as f:
dill.dump(entity_manager, f)
file = open(file_name, 'rb')
cursor = self._api.get_cursor()
file_data = file.read()
file.close()
self._entity_managers.delete_row_with_value('manager_id', entity_manager.manager_id)
cursor.execute('INSERT INTO entity_managers(manager_id, manager) VALUES (%s, %s);', (entity_manager.manager_id, psycopg2.Binary(file_data)))
self._api.commit()
# TODO : create delete_owner
def create_owner(self, name: str, password: str, email: str):
"""Places an owner into the owners table."""
self._owners.insert_row({'name': name, 'password': password, 'email': email})
# Get the owners info.
owner = self.get_owner(name)
# Create the manager here.
manager = EntityManager(manager_id=owner[INDEX_OWNER_MANAGER_ID], owner_id=owner[INDEX_OWNER_ID])
self.save_entity_manager(manager)
self._update_owner_cache()
def _update_owner_cache(self):
"""Updates the internal list of owners."""
self.cache_owners = self._owners.get_row_values()
def get_all_owners(self):
"""Returns a list of all the owners."""
return self.cache_owners
def get_owner(self, owner_name):
"""Returns the data for an owner, found by owner name."""
# TODO : Eventually just make this into a database query...
owners = self._owners.get_row_values()
for o in owners:
if o[0] == owner_name:
return o
return None
def get_entity_manager(self, manager_id=-1):
"""Returns the Entity Manager from the database by id, returns None if not found."""
# TODO : Make this a single query...
results = self._entity_managers.get_row_values()
for em in results:
if em[0] == manager_id:
return dill.loads(em[1].tobytes())
#self._api.execute_query('SET CLIENT_ENCODING TO LATIN1', save=True)
#result = self._entity_managers.get_single_value('manager', 'manager_id', manager_id)
#if result is not None:
# return dill.loads(result.tobytes())
return None
def _check_if_connected(self):
"""Connects if the database is not connected."""
if not self._connected:
if self._debug:
dbg.print_dashed_line()
print('Connecting to the database...', end='')
self._api.connect()
self._connected = True
if self._debug:
print('connected!')
dbg.print_dashed_line()
def terminate(self):
"""Terminates the database connection if there is one."""
if self._connected:
self._api.terminate()
# This function to be manually ran only.
def _full_reset(self):
"""Fully resets the database data for entities."""
self._owners.delete_if_exists()
self._owners.create_if_does_not_exist()
self._entity_managers.delete_if_exists()
self._entity_managers.create_if_does_not_exist()
#e = EntityDatabaseAPI(debug=True)
#print(e._api.execute_query_and_get_all_results('SELECT * FROM entity_managers'))
#print(e._api.get_all_table_names())
#print(e.get_all_owners())
#e._full_reset()
#print(e._owners.get_row_values())
#print(e._entity_managers.get_row_values())
|
from whoosh.fields import *
from whoosh.index import create_in, open_dir
from whoosh.qparser import MultifieldParser
from whoosh.query import *
import abc
import copy
import csv
import json
import os.path
import sys
# Abstract Search Engine class
# TODO: abstract out more functionality here
class SearchEngine(object):
# make it an abstract class
#
__metaclass__ = abc.ABCMeta
# TODO consider making more hierarchy. This is the WhooshSearchEngine,
# which has the cool indexing capabilities. But more generally, you
# could have a search engine that only has to support search().
# but at that point it's just a useless interface, mostly.
# anyway, such a search engine would let the query rewriting search engine
# inherit from search engine too.
def __init__(self, create, search_fields, index_path):
"""
Creates a new search engine.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
:param search_fields {str[]}: An array names of fields in the index that our
search engine will search against.
:param index_path {str}: A relative path to a folder where the whoosh
index should be stored.
"""
# TODO have an auto-detect feature that will determine if the
# index exists, and depending on that creates or loads the index
self.index_path = index_path
# both these functions return an index
if create:
self.index = self.create_index()
else:
self.index = self.load_index()
# set up searching
# first, query parser
self.parser = MultifieldParser(search_fields, self.index.schema)
def load_index(self):
"""
Used when the index is already created. This just loads it and
returns it for you.
"""
index = open_dir(self.index_path)
return index
def create_index(self):
"""
Subclasses must implement!
"""
raise NotImplementedError("Subclasses must implement!")
def get_empty_index(self, path, schema):
"""
Creates an empty index file, making the directory where it needs
to be stored if necessary. Returns the index.
"""
if not os.path.exists(path):
os.mkdir(path)
index = create_in(path, schema)
return index
def search(self, query_string):
"""
Runs a plain-English search and returns results.
:param query_string {String}: a query like you'd type into Google.
:return: a list of dicts, each of which encodes a search result.
"""
outer_results = []
with self.index.searcher() as searcher:
query_obj = self.parser.parse(query_string)
# this variable is closed when the searcher is closed, so save this data
# in a variable outside the with-block
results = searcher.search(query_obj)
# this is still a list of Hits; convert to just a list of dicts
result_dicts = [hit.fields() for hit in list(results)]
# make sure we store it outside the with-block b/c scope
outer_results = result_dicts
return outer_results
class UdacitySearchEngine(SearchEngine):
DATASET_PATH = 'datasets/udacity-api.json'
INDEX_PATH = 'models/whoosh_indices/udacity'
SEARCH_FIELDS = ["title", "subtitle", "expected_learning", "syllabus", "summary", "short_summary"]
def __init__(self, create=False):
"""
Creates a new Udacity search engine.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(UdacitySearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the Udacity dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
"""
# load data
udacity_data = None
with open(self.DATASET_PATH, 'r') as file:
udacity_data = json.load(file)
# set up whoosh
# schema
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
schema = Schema(
slug=ID(stored=True),
title=TEXT(stored=True),
subtitle=TEXT,
expected_learning=TEXT,
syllabus=TEXT,
summary=TEXT,
short_summary=TEXT
)
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
try:
writer = index.writer()
for course in udacity_data['courses']:
writer.add_document(
slug=course['slug'],
title=course['title'],
subtitle=course['subtitle'],
expected_learning=course['expected_learning'],
syllabus=course['syllabus'],
summary=course['summary'],
short_summary=course['short_summary'])
writer.commit()
except Exception as e:
print e
# all done for now
return index
class HarvardXSearchEngine(SearchEngine):
INDEX_PATH = 'models/whoosh_indices/harvardx'
SEARCH_FIELDS = ["display_name", "contents"]
def __init__(self, create=False):
"""
Creates a new HarvardX search engine. Searches over the HarvardX/DART
database of all courses and course materials used in HarvardX. This includes
videos, quizzes, etc.
TODO: consider renaming to DART, probz
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(HarvardXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
# real data
csvfile_path = 'datasets/corpus_HarvardX_LatestCourses_based_on_2016-10-18.csv'
# test data
# csvfile_path = 'datasets/test.csv'
# only consider resources with this category (type of content)
# unsure about courses (b/c they have no content) and html (b/c they often include messy CSS/JS in there)
# TODO: add "html" support. requires stripping comments
# http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
#
supported_categories = ('problem', 'video', 'course')
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
display_name=TEXT(stored=True),
contents=TEXT
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
# first, some of the fields are HUGE so we need to let the csv
# reader handle them
csv.field_size_limit(sys.maxsize)
with open(csvfile_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# ensure the content is actually a valid type
if row['category'] not in supported_categories:
pass
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
display_name=row['display_name'].decode('utf8'),
contents=row['contents'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
class EdXSearchEngine(SearchEngine):
INDEX_PATH = 'models/whoosh_indices/edx'
SEARCH_FIELDS = ["name"]
def __init__(self, create=False):
"""
Creates a new search engine that searches over edX courses.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(EdXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
csvfile_path = 'datasets/Master CourseListings - edX.csv'
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
name=TEXT(stored=True)
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
with open(csvfile_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
name=row['name'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
Mainly adding the path specific items to search.py
from whoosh.fields import *
from whoosh.index import create_in, open_dir
from whoosh.qparser import MultifieldParser
from whoosh.query import *
import abc
import copy
import csv
import json
import os.path
import sys
import secure
# Abstract Search Engine class
# TODO: abstract out more functionality here
class SearchEngine(object):
# make it an abstract class
#
__metaclass__ = abc.ABCMeta
# TODO consider making more hierarchy. This is the WhooshSearchEngine,
# which has the cool indexing capabilities. But more generally, you
# could have a search engine that only has to support search().
# but at that point it's just a useless interface, mostly.
# anyway, such a search engine would let the query rewriting search engine
# inherit from search engine too.
def __init__(self, create, search_fields, index_path):
"""
Creates a new search engine.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
:param search_fields {str[]}: An array names of fields in the index that our
search engine will search against.
:param index_path {str}: A relative path to a folder where the whoosh
index should be stored.
"""
# TODO have an auto-detect feature that will determine if the
# index exists, and depending on that creates or loads the index
self.index_path = index_path
# both these functions return an index
if create:
self.index = self.create_index()
else:
self.index = self.load_index()
# set up searching
# first, query parser
self.parser = MultifieldParser(search_fields, self.index.schema)
def load_index(self):
"""
Used when the index is already created. This just loads it and
returns it for you.
"""
index = open_dir(self.index_path)
return index
def create_index(self):
"""
Subclasses must implement!
"""
raise NotImplementedError("Subclasses must implement!")
def get_empty_index(self, path, schema):
"""
Creates an empty index file, making the directory where it needs
to be stored if necessary. Returns the index.
"""
if not os.path.exists(path):
os.mkdir(path)
index = create_in(path, schema)
return index
def search(self, query_string):
"""
Runs a plain-English search and returns results.
:param query_string {String}: a query like you'd type into Google.
:return: a list of dicts, each of which encodes a search result.
"""
outer_results = []
with self.index.searcher() as searcher:
query_obj = self.parser.parse(query_string)
# this variable is closed when the searcher is closed, so save this data
# in a variable outside the with-block
results = searcher.search(query_obj)
# this is still a list of Hits; convert to just a list of dicts
result_dicts = [hit.fields() for hit in list(results)]
# make sure we store it outside the with-block b/c scope
outer_results = result_dicts
return outer_results
class UdacitySearchEngine(SearchEngine):
DATASET_PATH = secure.DATASET_PATH_BASE+'udacity-api.json'
INDEX_PATH = secure.INDEX_PATH_BASE+'udacity'
SEARCH_FIELDS = ["title", "subtitle", "expected_learning", "syllabus", "summary", "short_summary"]
def __init__(self, create=False):
"""
Creates a new Udacity search engine.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(UdacitySearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the Udacity dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
"""
# load data
udacity_data = None
with open(self.DATASET_PATH, 'r') as file:
udacity_data = json.load(file)
# set up whoosh
# schema
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
schema = Schema(
slug=ID(stored=True),
title=TEXT(stored=True),
subtitle=TEXT,
expected_learning=TEXT,
syllabus=TEXT,
summary=TEXT,
short_summary=TEXT
)
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
try:
writer = index.writer()
for course in udacity_data['courses']:
writer.add_document(
slug=course['slug'],
title=course['title'],
subtitle=course['subtitle'],
expected_learning=course['expected_learning'],
syllabus=course['syllabus'],
summary=course['summary'],
short_summary=course['short_summary'])
writer.commit()
except Exception as e:
print e
# all done for now
return index
class HarvardXSearchEngine(SearchEngine):
INDEX_PATH = secure.INDEX_PATH_BASE+'harvardx'
SEARCH_FIELDS = ["display_name", "contents"]
def __init__(self, create=False):
"""
Creates a new HarvardX search engine. Searches over the HarvardX/DART
database of all courses and course materials used in HarvardX. This includes
videos, quizzes, etc.
TODO: consider renaming to DART, probz
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(HarvardXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
# real data
csvfile_path = 'datasets/corpus_HarvardX_LatestCourses_based_on_2016-10-18.csv'
# test data
# csvfile_path = 'datasets/test.csv'
# only consider resources with this category (type of content)
# unsure about courses (b/c they have no content) and html (b/c they often include messy CSS/JS in there)
# TODO: add "html" support. requires stripping comments
# http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python
#
supported_categories = ('problem', 'video', 'course')
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
display_name=TEXT(stored=True),
contents=TEXT
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
# first, some of the fields are HUGE so we need to let the csv
# reader handle them
csv.field_size_limit(sys.maxsize)
with open(csvfile_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# ensure the content is actually a valid type
if row['category'] not in supported_categories:
pass
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
display_name=row['display_name'].decode('utf8'),
contents=row['contents'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
class EdXSearchEngine(SearchEngine):
INDEX_PATH = secure.INDEX_PATH_BASE+'edx'
SEARCH_FIELDS = ["name"]
def __init__(self, create=False):
"""
Creates a new search engine that searches over edX courses.
:param create {bool}: If True, recreates an index from scratch.
If False, loads the existing index
"""
super(EdXSearchEngine, self).__init__(
create, self.SEARCH_FIELDS, self.INDEX_PATH)
def create_index(self):
"""
Creates a new index to search the dataset. You only need to
call this once; once the index is created, you can just load it again
instead of creating it afresh all the time.
Returns the index object.
"""
# load data
csvfile_path = 'datasets/Master CourseListings - edX.csv'
# set up whoosh schema
schema = Schema(
course_id=ID(stored=True),
name=TEXT(stored=True)
)
# TODO: use StemmingAnalyzer here so we get the built-in benefits
# of stemming in our search engine
# http://whoosh.readthedocs.io/en/latest/stemming.html
# make an index to store this stuff in
index = self.get_empty_index(self.INDEX_PATH, schema)
# start adding documents (i.e. the courses) to the index
with open(csvfile_path, 'r') as csvfile:
reader = csv.DictReader(csvfile)
writer = index.writer()
try:
for row in reader:
# write
writer.add_document(
course_id=row['course_id'].decode('utf8'),
name=row['name'].decode('utf8'))
writer.commit()
except Exception as e:
print e
writer.cancel()
# all done for now
return index
|
import pandas as pd
import networkx as nx
from na3x.transformation.transformer import transformer
from na3x.utils.converter import Converter
from na3x.utils.aggregator import Aggregator
from logic.constants import DbConstants, ParamConstants
from copy import deepcopy
from logic.gantt import Task, Link
@transformer
def dates2range(input, **params):
PARAM_FIELD_STARTDATE = 'field.startDate'
PARAM_FIELD_ENDDATE = 'field.endDate'
PARAM_FIELD_RANGE = 'field.range'
return {params.get(PARAM_FIELD_RANGE): pd.date_range(input[params.get(PARAM_FIELD_STARTDATE)],
input[params.get(PARAM_FIELD_ENDDATE)]).tolist()}
@transformer
def sec2hrs(input, **params):
PARAM_FIELDS = 'fields'
fields = params.get(PARAM_FIELDS)
for field in fields:
for row in input:
row[field] = row[field]/3600 if row[field] else None
return input
@transformer
def filter_allocations_on_backlog(input, **params):
res = []
for allocation in input[DbConstants.SCRUM_ALLOCATIONS]:
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
if allocation[ParamConstants.PARAM_ITEM_KEY] == item[ParamConstants.PARAM_ITEM_KEY]:
res.append(allocation)
return res
@transformer
def filter_allocations_on_employees(input, **params):
res = []
for allocation in input[DbConstants.SCRUM_ALLOCATIONS]:
for employee in input[DbConstants.PROJECT_EMPLOYEES]:
if allocation[ParamConstants.PARAM_EMPLOYEE] == employee[ParamConstants.PARAM_EMPLOYEE_NAME]:
res.append(allocation)
return res
@transformer
def filter_team_on_employees(input, **params): # ToDo: fix capacity calculation
EMPLOYEES = 'employees'
res = []
for group in input[DbConstants.PROJECT_TEAM]:
n_group = deepcopy(group)
for e_employee in n_group[EMPLOYEES]:
employee_to_remove = True
for n_employee in input[DbConstants.PROJECT_EMPLOYEES]:
if n_employee[ParamConstants.PARAM_EMPLOYEE_NAME] == e_employee[ParamConstants.PARAM_EMPLOYEE_NAME]:
employee_to_remove = False
break
if employee_to_remove:
n_group[EMPLOYEES].remove(e_employee)
res.append(n_group)
return res
@transformer
def gantt_links(input, **params):
graph = nx.DiGraph()
for link in input:
link_type = link[Link.LINK_TYPE]
link_source = link[Link.LINK_SOURCE]
link_target = link[Link.LINK_TARGET]
if link_type == Link.LINK_BLOCKS:
graph.add_edge(link_source, link_target)
elif link_type == Link.LINK_BLOCKED and (link_target, link_source) not in graph.edges:
graph.add_edge(link_target, link_source)
res = []
for edge in graph.edges:
link = {Link.LINK_ID: '{}->{}'.format(edge[0], edge[1]),
Link.LINK_SOURCE: edge[0], Link.LINK_TARGET: edge[1], Link.LINK_TYPE: 2}
res.append(link)
return res
@transformer
def gantt_tasks(input, **params):
graph = nx.DiGraph()
for link in input[DbConstants.GANTT_LINKS]:
graph.add_edge(link[Link.LINK_SOURCE], link[Link.LINK_TARGET])
ext = []
for node in graph.nodes:
is_ext = True
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
if node == item[ParamConstants.PARAM_ITEM_KEY]:
is_ext = False
break
if is_ext:
ext.append(node)
res = []
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
res.append(Task.create_task(item[ParamConstants.PARAM_ITEM_KEY]))
if len(ext) > 0:
res.append(Task.create_fake_task(Task.TASK_EXT, 'External dependencies'))
for task in ext:
res.append(Task.create_fake_task(task, task, Task.TASK_EXT))
return res
@transformer
def merge_plan_vs_actual(input, **params):
INPUT_PLAN = 'plan.issues'
INPUT_ACTUAL = 'actual.issues.status'
plan_df = pd.DataFrame.from_records(input[INPUT_PLAN])
actual_df = pd.DataFrame.from_records(input[INPUT_ACTUAL])
res = Converter.df2list(
plan_df.merge(actual_df, right_on=ParamConstants.PARAM_ITEM_KEY, left_on=ParamConstants.PARAM_ITEM_KEY,
suffixes=('_plan', '_actual'), indicator=True, how='outer'))
return res
@transformer
def plan_vs_actual_discrepencies(input, **params):
INPUT_PLAN_VS_ACTUAL = 'actual.issues.discrepencies'
INPUT_ACTUAL_STATUS_DATE = 'actual.status.date'
INPUR_SERVER_DATE = 'serverdate'
INPUT_PLAN = 'sprint.allocations'
OUT_START_DATE_PLAN = 'start_plan'
OUT_END_DATE_PLAN = 'end_plan'
OUT_WHRS_PLAN = 'whrs_plan'
OUT_ASSIGNEES_PLAN = 'assignees_plan'
status_date = input[INPUT_ACTUAL_STATUS_DATE][INPUR_SERVER_DATE]
plan_df = pd.DataFrame.from_records(input[INPUT_PLAN])
schedule_plan_agg = Aggregator.agg_multi_func(Converter.df2list(plan_df), ParamConstants.PARAM_DATE, ['min', 'max'],
ParamConstants.PARAM_ITEM_KEY)
plan_df.where(plan_df.date <= status_date, inplace=True)
spent_plan_agg = Aggregator.agg_single_func(Converter.df2list(plan_df), ParamConstants.PARAM_WHRS, 'sum',
ParamConstants.PARAM_ITEM_KEY)
plan_df.where(plan_df.date == status_date, inplace=True)
discrepencies = Converter.df2list(pd.DataFrame.from_records(input[INPUT_PLAN_VS_ACTUAL]))
for item in discrepencies:
key = item[ParamConstants.PARAM_ITEM_KEY]
if key in schedule_plan_agg:
item[OUT_START_DATE_PLAN] = schedule_plan_agg[key]['min']
item[OUT_END_DATE_PLAN] = schedule_plan_agg[key]['max']
if key in spent_plan_agg:
item[OUT_WHRS_PLAN] = spent_plan_agg[key]
item_assignees = Converter.df2list(plan_df.where(plan_df.key==key))
assignees_plan = []
for assignee in item_assignees:
if assignee[ParamConstants.PARAM_GROUP] and assignee[ParamConstants.PARAM_EMPLOYEE] and item[
ParamConstants.PARAM_ITEM_KEY] == assignee[ParamConstants.PARAM_ITEM_KEY]:
assignees_plan.append({ParamConstants.PARAM_GROUP: assignee[ParamConstants.PARAM_GROUP],
ParamConstants.PARAM_EMPLOYEE: assignee[ParamConstants.PARAM_EMPLOYEE]})
item[OUT_ASSIGNEES_PLAN] = assignees_plan
return discrepencies
assignment -> allocation
fix sprint timeline
import pandas as pd
import networkx as nx
from na3x.transformation.transformer import transformer
from na3x.utils.converter import Converter
from na3x.utils.aggregator import Aggregator
from logic.constants import DbConstants, ParamConstants
from copy import deepcopy
from logic.gantt import Task, Link
@transformer
def dates2range(input, **params):
PARAM_FIELD_STARTDATE = 'field.startDate'
PARAM_FIELD_ENDDATE = 'field.endDate'
PARAM_FIELD_RANGE = 'field.range'
return {params.get(PARAM_FIELD_RANGE): pd.date_range(input[params.get(PARAM_FIELD_STARTDATE)],
input[params.get(PARAM_FIELD_ENDDATE)], normalize=True).tolist()}
@transformer
def sec2hrs(input, **params):
PARAM_FIELDS = 'fields'
fields = params.get(PARAM_FIELDS)
for field in fields:
for row in input:
row[field] = row[field]/3600 if row[field] else None
return input
@transformer
def filter_allocations_on_backlog(input, **params):
res = []
for allocation in input[DbConstants.SCRUM_ALLOCATIONS]:
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
if allocation[ParamConstants.PARAM_ITEM_KEY] == item[ParamConstants.PARAM_ITEM_KEY]:
res.append(allocation)
return res
@transformer
def filter_allocations_on_employees(input, **params):
res = []
for allocation in input[DbConstants.SCRUM_ALLOCATIONS]:
for employee in input[DbConstants.PROJECT_EMPLOYEES]:
if allocation[ParamConstants.PARAM_EMPLOYEE] == employee[ParamConstants.PARAM_EMPLOYEE_NAME]:
res.append(allocation)
return res
@transformer
def filter_team_on_employees(input, **params): # ToDo: fix capacity calculation
EMPLOYEES = 'employees'
res = []
for group in input[DbConstants.PROJECT_TEAM]:
n_group = deepcopy(group)
for e_employee in n_group[EMPLOYEES]:
employee_to_remove = True
for n_employee in input[DbConstants.PROJECT_EMPLOYEES]:
if n_employee[ParamConstants.PARAM_EMPLOYEE_NAME] == e_employee[ParamConstants.PARAM_EMPLOYEE_NAME]:
employee_to_remove = False
break
if employee_to_remove:
n_group[EMPLOYEES].remove(e_employee)
res.append(n_group)
return res
@transformer
def gantt_links(input, **params):
graph = nx.DiGraph()
for link in input:
link_type = link[Link.LINK_TYPE]
link_source = link[Link.LINK_SOURCE]
link_target = link[Link.LINK_TARGET]
if link_type == Link.LINK_BLOCKS:
graph.add_edge(link_source, link_target)
elif link_type == Link.LINK_BLOCKED and (link_target, link_source) not in graph.edges:
graph.add_edge(link_target, link_source)
res = []
for edge in graph.edges:
link = {Link.LINK_ID: '{}->{}'.format(edge[0], edge[1]),
Link.LINK_SOURCE: edge[0], Link.LINK_TARGET: edge[1], Link.LINK_TYPE: 2}
res.append(link)
return res
@transformer
def gantt_tasks(input, **params):
graph = nx.DiGraph()
for link in input[DbConstants.GANTT_LINKS]:
graph.add_edge(link[Link.LINK_SOURCE], link[Link.LINK_TARGET])
ext = []
for node in graph.nodes:
is_ext = True
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
if node == item[ParamConstants.PARAM_ITEM_KEY]:
is_ext = False
break
if is_ext:
ext.append(node)
res = []
for item in input[DbConstants.SCRUM_SPRINT_BACKLOG]:
res.append(Task.create_task(item[ParamConstants.PARAM_ITEM_KEY]))
if len(ext) > 0:
res.append(Task.create_fake_task(Task.TASK_EXT, 'External dependencies'))
for task in ext:
res.append(Task.create_fake_task(task, task, Task.TASK_EXT))
return res
@transformer
def merge_plan_vs_actual(input, **params):
INPUT_PLAN = 'plan.issues'
INPUT_ACTUAL = 'actual.issues.status'
plan_df = pd.DataFrame.from_records(input[INPUT_PLAN])
actual_df = pd.DataFrame.from_records(input[INPUT_ACTUAL])
res = Converter.df2list(
plan_df.merge(actual_df, right_on=ParamConstants.PARAM_ITEM_KEY, left_on=ParamConstants.PARAM_ITEM_KEY,
suffixes=('_plan', '_actual'), indicator=True, how='outer'))
return res
@transformer
def plan_vs_actual_discrepencies(input, **params):
INPUT_PLAN_VS_ACTUAL = 'actual.issues.discrepencies'
INPUT_ACTUAL_STATUS_DATE = 'actual.status.date'
INPUR_SERVER_DATE = 'serverdate'
INPUT_PLAN = 'sprint.allocations'
OUT_START_DATE_PLAN = 'start_plan'
OUT_END_DATE_PLAN = 'end_plan'
OUT_WHRS_PLAN = 'whrs_plan'
OUT_ASSIGNEES_PLAN = 'assignees_plan'
status_date = input[INPUT_ACTUAL_STATUS_DATE][INPUR_SERVER_DATE]
plan_df = pd.DataFrame.from_records(input[INPUT_PLAN])
schedule_plan_agg = Aggregator.agg_multi_func(Converter.df2list(plan_df), ParamConstants.PARAM_DATE, ['min', 'max'],
ParamConstants.PARAM_ITEM_KEY)
plan_df.where(plan_df.date <= status_date, inplace=True)
spent_plan_agg = Aggregator.agg_single_func(Converter.df2list(plan_df), ParamConstants.PARAM_WHRS, 'sum',
ParamConstants.PARAM_ITEM_KEY)
plan_df.where(plan_df.date == status_date, inplace=True)
discrepencies = Converter.df2list(pd.DataFrame.from_records(input[INPUT_PLAN_VS_ACTUAL]))
for item in discrepencies:
key = item[ParamConstants.PARAM_ITEM_KEY]
if key in schedule_plan_agg:
item[OUT_START_DATE_PLAN] = schedule_plan_agg[key]['min']
item[OUT_END_DATE_PLAN] = schedule_plan_agg[key]['max']
if key in spent_plan_agg:
item[OUT_WHRS_PLAN] = spent_plan_agg[key]
item_assignees = Converter.df2list(plan_df.where(plan_df.key==key))
assignees_plan = []
for assignee in item_assignees:
if assignee[ParamConstants.PARAM_GROUP] and assignee[ParamConstants.PARAM_EMPLOYEE] and item[
ParamConstants.PARAM_ITEM_KEY] == assignee[ParamConstants.PARAM_ITEM_KEY]:
assignees_plan.append({ParamConstants.PARAM_GROUP: assignee[ParamConstants.PARAM_GROUP],
ParamConstants.PARAM_EMPLOYEE: assignee[ParamConstants.PARAM_EMPLOYEE]})
item[OUT_ASSIGNEES_PLAN] = assignees_plan
return discrepencies |
import webApp
import xmlDatabase
import actions
import sseListener
import callUrl
import os
import argparse
import besthostname
import time
import json
import web
import subprocess
import imp
import threading
import cProfile
import pstats
import myLogger
from _version import VERSION
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#
# Helper for profileing multiple threads
#
PROFILER_STATS = None
def enable_thread_profiling():
'''Monkey-patch Thread.run to enable global profiling.
Each thread creates a local profiler; statistics are pooled
to the global stats object on run completion.'''
global PROFILER_STATS
import pstats
PROFILER_STATS = None
thread_run = threading.Thread.run
def profile_run(self):
print 'xxxjack profile_run'
self._prof = cProfile.Profile()
self._prof.enable()
thread_run(self)
self._prof.disable()
if PROFILER_STATS is None:
PROFILER_STATS = pstats.Stats(self._prof)
else:
PROFILER_STATS.add(self._prof)
threading.Thread.run = profile_run
print 'xxxjack inserted profiler'
class IgorServer:
def __init__(self, datadir, port=9333, advertise=False, profile=False, ssl=False):
#
# Create the database, and tell the web application about it
#
self.profile = None
if profile:
enable_thread_profiling()
self.profile = cProfile.Profile()
self.profile.enable()
self.port = port
self.app = webApp.WEBAPP
self.datadir = datadir
self.ssl = ssl
if self.ssl:
self.privateKeyFile = os.path.join(self.datadir, 'igor.key')
self.certificateFile = os.path.join(self.datadir, 'igor.crt')
import OpenSSL.crypto
certificateData = open(self.certificateFile, 'rb').read()
certificate = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificateData)
self.certificateFingerprint = certificate.digest("sha1")
else:
self.privateKeyFile = None
self.certificateFile = None
self.certificateFingerprint = None
self.database = xmlDatabase.DBImpl(os.path.join(self.datadir, 'database.xml'))
webApp.DATABASE = self.database # Have to set in a module-global variable, to be fixed some time...
webApp.SCRIPTDIR = os.path.join(datadir, 'scripts')
webApp.PLUGINDIR = os.path.join(datadir, 'plugins')
webApp.STATICDIR = os.path.join(datadir, 'static')
webApp.COMMANDS = self
#
# Create and start the asynchronous URL accessor
#
self.urlCaller = callUrl.URLCaller(self.app)
self.urlCaller.start()
#
# Fill self data
#
self.fillSelfData()
#
# Startup other components
#
self.actionHandler = None
self.updateActions()
self.eventSources = None
self.updateEventSources()
self.triggerHandler = None
self.updateTriggers()
#
# Disable debug
#
web.config.debug = False
#
# Send start action to start any plugins
#
self.urlCaller.callURL(dict(method='GET', url='/action/start'))
if advertise:
self.advertise(port)
def advertise(self, port):
if sys.platform == 'darwin':
cmd = ['dns-sd', '-R', 'igor', '_http._tcp', 'local', str(port)]
elif sys.platform == 'linux2':
cmd = ['avahi-publish', '-s', 'igor', '_http._tcp', str(port)]
else:
print >> sys.stderr, "Cannot do mdns-advertise on platform", sys.platform
return
try:
self.advertiser = subprocess.Popen(cmd)
except OSError:
print >> sys.stderr, "advertisement command failed: %s" % (' '.join(cmd))
def fillSelfData(self):
"""Put our details in the database"""
hostName = besthostname.besthostname()
protocol = 'http'
if self.ssl:
protocol = 'https'
url = '%s://%s:%d/data' % (protocol, hostName, self.port)
oldRebootCount = self.database.getValue('/data/services/igor/rebootCount')
rebootCount = 0
if oldRebootCount:
try:
rebootCount = int(oldRebootCount)+1
except ValueError:
pass
data = dict(host=hostName, url=url, port=self.port, protocol=protocol, startTime=int(time.time()), version=VERSION, ticker=0, rebootCount=rebootCount)
if self.certificateFingerprint:
data['fingerprint'] = self.certificateFingerprint
tocall = dict(method='PUT', url='/data/services/igor', mimetype='application/json', data=json.dumps(data), representing='igor/core')
self.urlCaller.callURL(tocall)
def run(self):
if self.ssl:
from web.wsgiserver import CherryPyWSGIServer
CherryPyWSGIServer.ssl_certificate = self.certificateFile
CherryPyWSGIServer.ssl_private_key = self.privateKeyFile
self.app.run(port=self.port)
def dump(self):
rv = ''
if self.urlCaller: rv += self.urlCaller.dump() + '\n'
if self.actionHandler: rv += self.actionHandler.dump() + '\n'
if self.eventSources: rv += self.eventSources.dump() + '\n'
return rv
def log(self):
logfn = os.path.join(self.datadir, 'igor.log')
if os.path.exists(logfn):
return open(logfn).read()
raise Web.HTTPError('404 Log file not available')
def updateStatus(self, subcommand=None, representing=None, alive=None, resultData=None, lastActivity=None, lastSuccess=None):
"""Update status field of some service/sensor/actuator after an action"""
if subcommand:
representing = subcommand
if representing.startswith('/data/'):
representing = representing[len('/data/'):]
if lastActivity == None:
lastActivity = time.time()
else:
lastActivity = float(lastActivity)
if lastSuccess == None and alive:
lastSuccess = lastActivity
# xxxjack this needs to be done differently. Too much spaghetti.
dbAccess = webApp.DATABASE_ACCESS
key = 'status/' + representing
# Check whether record exists, otherwise create it (empty)
try:
_ = dbAccess.get_key(key, 'application/x-python-object', 'content')
except web.HTTPError:
web.ctx.status = "200 OK" # Clear error, otherwise it is forwarded from this request
_ = dbAccess.put_key(key, 'application/x-python-object', None, '', 'text/plain')
# Fill only entries we want
_ = dbAccess.put_key(key + '/alive', 'application/x-python-object', None, not not alive, 'application/x-python-object')
_ = dbAccess.put_key(key + '/lastActivity', 'application/x-python-object', None, lastActivity, 'application/x-python-object')
if lastSuccess:
_ = dbAccess.put_key(key + '/lastSuccess', 'application/x-python-object', None, lastSuccess, 'application/x-python-object')
if alive:
_ = dbAccess.put_key(key + '/ignoreErrorsUntil', 'application/x-python-object', None, None, 'application/x-python-object')
resultData = ''
else:
_ = dbAccess.put_key(key + '/lastFailure', 'application/x-python-object', None, lastActivity, 'application/x-python-object')
if not resultData:
resultData = '%s failed without error message' % representing
if type(resultData) == type({}):
for k, v in resultData.items():
_ = dbAccess.put_key(key + '/' + k, 'application/x-python-object', None, v, 'application/x-python-object')
else:
_ = dbAccess.put_key(key + '/errorMessage', 'application/x-python-object', None, resultData, 'application/x-python-object')
return ''
def updateActions(self):
"""Create any (periodic) event handlers defined in the database"""
startupActions = self.database.getElements('actions')
if len(startupActions):
if len(startupActions) > 1:
raise web.HTTPError('401 only one <actions> element allowed')
if not self.actionHandler:
self.actionHandler = actions.ActionCollection(self.database, self.urlCaller.callURL)
self.actionHandler.updateActions(startupActions[0])
elif self.actionHandler:
self.actionHandler.updateActions([])
return 'OK'
def updateEventSources(self):
"""Create any SSE event sources that are defined in the database"""
eventSources = self.database.getElements('eventSources')
if len(eventSources):
if len(eventSources) > 1:
raise web.HTTPError('401 only one <eventSources> element allowed')
if not self.eventSources:
self.eventSources = sseListener.EventSourceCollection(self.database, self.urlCaller.callURL)
self.eventSources.updateEventSources(eventSources[0])
elif self.eventSources:
self.eventSources.updateEventSources([])
return 'OK'
def updateTriggers(self):
pass
def runAction(self, actionname):
if not self.actionHandler:
raise web.notfound()
nodes = self.database.getElements('actions/action[name="%s"]'%actionname)
if not nodes:
raise web.notfound()
for node in nodes:
self.actionHandler.triggerAction(node)
return 'OK'
def runTrigger(self, triggername):
raise web.HTTPError("502 triggers not yet implemented")
if not self.triggerHandler:
raise web.notfound()
triggerNodes = self.database.getElements('triggers/%s' % triggername)
if not triggerNodes:
raise web.notfound()
if len(triggerNodes) > 1:
raise web.HTTPError("502 multiple triggers %s in database" % triggername)
triggerNode = triggerNodes[0]
self.triggerHandler.triggerTrigger(triggerNode)
def save(self):
"""Saves the database to the filesystem"""
self.database.saveFile()
return 'OK'
def queue(self, subcommand):
"""Queues an internal command through callUrl (used for save/stop/restart)"""
self.urlCaller.callURL(dict(method='GET', url='/internal/%s' % subcommand))
return 'OK'
def started(self):
return "IgorServer started"
def stop(self):
"""Exits igorServer after saving"""
global PROFILER_STATS
if self.actionHandler:
self.actionHandler.stop()
self.actionHandler = None
if self.eventSources:
self.eventSources.stop()
self.eventSources = None
if self.triggerHandler:
self.triggerHandler.stop()
self.triggerHandler = None
if self.urlCaller:
self.urlCaller.stop()
self.urlCaller = None
self.save()
if self.profile:
self.profile.disable()
if PROFILER_STATS is None:
PROFILER_STATS = pstats.Stats(self.profile)
else:
PROFILER_STATS.add(self.profile)
PROFILER_STATS.dump_stats("igor.profile")
sys.exit(0)
def restart(self):
"""Saves the database and restarts igorServer"""
self.save()
os.closerange(3, subprocess.MAXFD)
os.execl(sys.executable, sys.executable, *sys.argv)
def command(self):
rv = ''
if 'IGORSERVER_DIR' in os.environ:
rv = rv + 'export IGORSERVER_DIR=' + repr(os.environ['IGORSERVER_DIR']) + '\n'
if 'IGORSERVER_PORT' in os.environ:
rv = rv + 'export IGORSERVER_PORT=%d\n' % int(os.environ['IGORSERVER_PORT'])
rv = rv + 'exec %s' % repr(sys.executable)
for a in sys.argv:
rv += ' ' + repr(a)
rv += '\n'
return rv
def help(self):
rv = 'Internal igor commands:\n'
rv += 'help - this help\n'
rv += 'version - return version number\n'
rv += 'save - Make sure database is saved to disk\n'
rv += 'restart - Save and restart this Igor (may appear to fail even when executed correctly)\n'
rv += 'stop - Save and stop this Igor (may appear to fail even when executed correctly)\n'
rv += 'command - Show command line that started this Igor instance\n'
rv += 'dump - Show internal run queue of this Igor instance\n'
rv += 'log - Show httpd-style log file of this Igor instance\n'
return rv
def version(self):
return VERSION + '\n'
def main():
DEFAULTDIR=os.path.join(os.path.expanduser('~'), '.igor')
if 'IGORSERVER_DIR' in os.environ:
DEFAULTDIR = os.environ['IGORSERVER_DIR']
DEFAULTPORT=9333
if 'IGORSERVER_PORT' in os.environ:
DEFAULTDIR = int(os.environ['IGORSERVER_PORT'])
parser = argparse.ArgumentParser(description="Run the Igor home automation server")
parser.add_argument("-d", "--database", metavar="DIR", help="Database and scripts are stored in DIR (default: %s, environment IGORSERVER_DIR)" % DEFAULTDIR, default=DEFAULTDIR)
parser.add_argument("-p", "--port", metavar="PORT", type=int, help="Port to serve on (default: 9333, environment IGORSERVER_PORT)", default=DEFAULTPORT)
parser.add_argument("-s", "--ssl", action="store_true", help="Use https (ssl) on the service")
parser.add_argument("--debug", action="store_true", help="Enable debug output")
parser.add_argument("--advertise", action="store_true", help="Advertise service through bonjour/zeroconf")
parser.add_argument("--version", action="store_true", help="Print version and exit")
parser.add_argument("--profile", action="store_true", help="Enable Python profiler (debugging Igor only)")
parser.add_argument('--logLevel', metavar='SPEC', help="Set log levels (comma-separated list of [loggername:]LOGLEVEL)")
args = parser.parse_args()
myLogger.install(args.logLevel)
if args.version:
print VERSION
sys.exit(0)
if args.debug:
callUrl.DEBUG = True
sseListener.DEBUG = True
actions.DEBUG = True
xmlDatabase.DEBUG = True
webApp.DEBUG = True
datadir = args.database
print 'igorServer %s running from %s' % (VERSION, sys.argv[0])
try:
igorServer = IgorServer(datadir, args.port, args.advertise, profile=args.profile, ssl=args.ssl)
except IOError, arg:
print >>sys.stderr, '%s: Cannot open database: %s' % (sys.argv[0], arg)
print >>sys.stderr, '%s: Use --help option to see command line arguments' % sys.argv[0]
sys.exit(1)
igorServer.run()
#
# We need to hack the import lock. In case we get here via the easy_install igorServer script
# we are inside an __import__(), and we hold the lock. This means other threads cannot import
# and we hang once a web request comes in. We "work around" this by releasing the lock.
#
hasImportLock = imp.lock_held()
if hasImportLock:
imp.release_lock()
main()
if hasImportLock:
imp.acquire_lock()
Make https operation default if keyfile is available (reverted with --nossl)
import webApp
import xmlDatabase
import actions
import sseListener
import callUrl
import os
import argparse
import besthostname
import time
import json
import web
import subprocess
import imp
import threading
import cProfile
import pstats
import myLogger
from _version import VERSION
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#
# Helper for profileing multiple threads
#
PROFILER_STATS = None
def enable_thread_profiling():
'''Monkey-patch Thread.run to enable global profiling.
Each thread creates a local profiler; statistics are pooled
to the global stats object on run completion.'''
global PROFILER_STATS
import pstats
PROFILER_STATS = None
thread_run = threading.Thread.run
def profile_run(self):
print 'xxxjack profile_run'
self._prof = cProfile.Profile()
self._prof.enable()
thread_run(self)
self._prof.disable()
if PROFILER_STATS is None:
PROFILER_STATS = pstats.Stats(self._prof)
else:
PROFILER_STATS.add(self._prof)
threading.Thread.run = profile_run
print 'xxxjack inserted profiler'
class IgorServer:
def __init__(self, datadir, port=9333, advertise=False, profile=False, nossl=False):
#
# Create the database, and tell the web application about it
#
self.profile = None
if profile:
enable_thread_profiling()
self.profile = cProfile.Profile()
self.profile.enable()
self.port = port
self.app = webApp.WEBAPP
self.datadir = datadir
self.ssl = not nossl
keyFile = os.path.join(self.datadir, 'igor.key')
if self.ssl and not os.path.exists(keyFile):
print 'Warning: Using http in stead of https: no private key file', keyFile
self.ssl = False
if self.ssl:
self.privateKeyFile = keyFile
self.certificateFile = os.path.join(self.datadir, 'igor.crt')
import OpenSSL.crypto
certificateData = open(self.certificateFile, 'rb').read()
certificate = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificateData)
self.certificateFingerprint = certificate.digest("sha1")
else:
self.privateKeyFile = None
self.certificateFile = None
self.certificateFingerprint = None
self.database = xmlDatabase.DBImpl(os.path.join(self.datadir, 'database.xml'))
webApp.DATABASE = self.database # Have to set in a module-global variable, to be fixed some time...
webApp.SCRIPTDIR = os.path.join(datadir, 'scripts')
webApp.PLUGINDIR = os.path.join(datadir, 'plugins')
webApp.STATICDIR = os.path.join(datadir, 'static')
webApp.COMMANDS = self
#
# Create and start the asynchronous URL accessor
#
self.urlCaller = callUrl.URLCaller(self.app)
self.urlCaller.start()
#
# Fill self data
#
self.fillSelfData()
#
# Startup other components
#
self.actionHandler = None
self.updateActions()
self.eventSources = None
self.updateEventSources()
self.triggerHandler = None
self.updateTriggers()
#
# Disable debug
#
web.config.debug = False
#
# Send start action to start any plugins
#
self.urlCaller.callURL(dict(method='GET', url='/action/start'))
if advertise:
self.advertise(port)
def advertise(self, port):
if sys.platform == 'darwin':
cmd = ['dns-sd', '-R', 'igor', '_http._tcp', 'local', str(port)]
elif sys.platform == 'linux2':
cmd = ['avahi-publish', '-s', 'igor', '_http._tcp', str(port)]
else:
print >> sys.stderr, "Cannot do mdns-advertise on platform", sys.platform
return
try:
self.advertiser = subprocess.Popen(cmd)
except OSError:
print >> sys.stderr, "advertisement command failed: %s" % (' '.join(cmd))
def fillSelfData(self):
"""Put our details in the database"""
hostName = besthostname.besthostname()
protocol = 'http'
if self.ssl:
protocol = 'https'
url = '%s://%s:%d/data' % (protocol, hostName, self.port)
oldRebootCount = self.database.getValue('/data/services/igor/rebootCount')
rebootCount = 0
if oldRebootCount:
try:
rebootCount = int(oldRebootCount)+1
except ValueError:
pass
data = dict(host=hostName, url=url, port=self.port, protocol=protocol, startTime=int(time.time()), version=VERSION, ticker=0, rebootCount=rebootCount)
if self.certificateFingerprint:
data['fingerprint'] = self.certificateFingerprint
tocall = dict(method='PUT', url='/data/services/igor', mimetype='application/json', data=json.dumps(data), representing='igor/core')
self.urlCaller.callURL(tocall)
def run(self):
if self.ssl:
from web.wsgiserver import CherryPyWSGIServer
CherryPyWSGIServer.ssl_certificate = self.certificateFile
CherryPyWSGIServer.ssl_private_key = self.privateKeyFile
self.app.run(port=self.port)
def dump(self):
rv = ''
if self.urlCaller: rv += self.urlCaller.dump() + '\n'
if self.actionHandler: rv += self.actionHandler.dump() + '\n'
if self.eventSources: rv += self.eventSources.dump() + '\n'
return rv
def log(self):
logfn = os.path.join(self.datadir, 'igor.log')
if os.path.exists(logfn):
return open(logfn).read()
raise Web.HTTPError('404 Log file not available')
def updateStatus(self, subcommand=None, representing=None, alive=None, resultData=None, lastActivity=None, lastSuccess=None):
"""Update status field of some service/sensor/actuator after an action"""
if subcommand:
representing = subcommand
if representing.startswith('/data/'):
representing = representing[len('/data/'):]
if lastActivity == None:
lastActivity = time.time()
else:
lastActivity = float(lastActivity)
if lastSuccess == None and alive:
lastSuccess = lastActivity
# xxxjack this needs to be done differently. Too much spaghetti.
dbAccess = webApp.DATABASE_ACCESS
key = 'status/' + representing
# Check whether record exists, otherwise create it (empty)
try:
_ = dbAccess.get_key(key, 'application/x-python-object', 'content')
except web.HTTPError:
web.ctx.status = "200 OK" # Clear error, otherwise it is forwarded from this request
_ = dbAccess.put_key(key, 'application/x-python-object', None, '', 'text/plain')
# Fill only entries we want
_ = dbAccess.put_key(key + '/alive', 'application/x-python-object', None, not not alive, 'application/x-python-object')
_ = dbAccess.put_key(key + '/lastActivity', 'application/x-python-object', None, lastActivity, 'application/x-python-object')
if lastSuccess:
_ = dbAccess.put_key(key + '/lastSuccess', 'application/x-python-object', None, lastSuccess, 'application/x-python-object')
if alive:
_ = dbAccess.put_key(key + '/ignoreErrorsUntil', 'application/x-python-object', None, None, 'application/x-python-object')
resultData = ''
else:
_ = dbAccess.put_key(key + '/lastFailure', 'application/x-python-object', None, lastActivity, 'application/x-python-object')
if not resultData:
resultData = '%s failed without error message' % representing
if type(resultData) == type({}):
for k, v in resultData.items():
_ = dbAccess.put_key(key + '/' + k, 'application/x-python-object', None, v, 'application/x-python-object')
else:
_ = dbAccess.put_key(key + '/errorMessage', 'application/x-python-object', None, resultData, 'application/x-python-object')
return ''
def updateActions(self):
"""Create any (periodic) event handlers defined in the database"""
startupActions = self.database.getElements('actions')
if len(startupActions):
if len(startupActions) > 1:
raise web.HTTPError('401 only one <actions> element allowed')
if not self.actionHandler:
self.actionHandler = actions.ActionCollection(self.database, self.urlCaller.callURL)
self.actionHandler.updateActions(startupActions[0])
elif self.actionHandler:
self.actionHandler.updateActions([])
return 'OK'
def updateEventSources(self):
"""Create any SSE event sources that are defined in the database"""
eventSources = self.database.getElements('eventSources')
if len(eventSources):
if len(eventSources) > 1:
raise web.HTTPError('401 only one <eventSources> element allowed')
if not self.eventSources:
self.eventSources = sseListener.EventSourceCollection(self.database, self.urlCaller.callURL)
self.eventSources.updateEventSources(eventSources[0])
elif self.eventSources:
self.eventSources.updateEventSources([])
return 'OK'
def updateTriggers(self):
pass
def runAction(self, actionname):
if not self.actionHandler:
raise web.notfound()
nodes = self.database.getElements('actions/action[name="%s"]'%actionname)
if not nodes:
raise web.notfound()
for node in nodes:
self.actionHandler.triggerAction(node)
return 'OK'
def runTrigger(self, triggername):
raise web.HTTPError("502 triggers not yet implemented")
if not self.triggerHandler:
raise web.notfound()
triggerNodes = self.database.getElements('triggers/%s' % triggername)
if not triggerNodes:
raise web.notfound()
if len(triggerNodes) > 1:
raise web.HTTPError("502 multiple triggers %s in database" % triggername)
triggerNode = triggerNodes[0]
self.triggerHandler.triggerTrigger(triggerNode)
def save(self):
"""Saves the database to the filesystem"""
self.database.saveFile()
return 'OK'
def queue(self, subcommand):
"""Queues an internal command through callUrl (used for save/stop/restart)"""
self.urlCaller.callURL(dict(method='GET', url='/internal/%s' % subcommand))
return 'OK'
def started(self):
return "IgorServer started"
def stop(self):
"""Exits igorServer after saving"""
global PROFILER_STATS
if self.actionHandler:
self.actionHandler.stop()
self.actionHandler = None
if self.eventSources:
self.eventSources.stop()
self.eventSources = None
if self.triggerHandler:
self.triggerHandler.stop()
self.triggerHandler = None
if self.urlCaller:
self.urlCaller.stop()
self.urlCaller = None
self.save()
if self.profile:
self.profile.disable()
if PROFILER_STATS is None:
PROFILER_STATS = pstats.Stats(self.profile)
else:
PROFILER_STATS.add(self.profile)
PROFILER_STATS.dump_stats("igor.profile")
sys.exit(0)
def restart(self):
"""Saves the database and restarts igorServer"""
self.save()
os.closerange(3, subprocess.MAXFD)
os.execl(sys.executable, sys.executable, *sys.argv)
def command(self):
rv = ''
if 'IGORSERVER_DIR' in os.environ:
rv = rv + 'export IGORSERVER_DIR=' + repr(os.environ['IGORSERVER_DIR']) + '\n'
if 'IGORSERVER_PORT' in os.environ:
rv = rv + 'export IGORSERVER_PORT=%d\n' % int(os.environ['IGORSERVER_PORT'])
rv = rv + 'exec %s' % repr(sys.executable)
for a in sys.argv:
rv += ' ' + repr(a)
rv += '\n'
return rv
def help(self):
rv = 'Internal igor commands:\n'
rv += 'help - this help\n'
rv += 'version - return version number\n'
rv += 'save - Make sure database is saved to disk\n'
rv += 'restart - Save and restart this Igor (may appear to fail even when executed correctly)\n'
rv += 'stop - Save and stop this Igor (may appear to fail even when executed correctly)\n'
rv += 'command - Show command line that started this Igor instance\n'
rv += 'dump - Show internal run queue of this Igor instance\n'
rv += 'log - Show httpd-style log file of this Igor instance\n'
return rv
def version(self):
return VERSION + '\n'
def main():
DEFAULTDIR=os.path.join(os.path.expanduser('~'), '.igor')
if 'IGORSERVER_DIR' in os.environ:
DEFAULTDIR = os.environ['IGORSERVER_DIR']
DEFAULTPORT=9333
if 'IGORSERVER_PORT' in os.environ:
DEFAULTDIR = int(os.environ['IGORSERVER_PORT'])
parser = argparse.ArgumentParser(description="Run the Igor home automation server")
parser.add_argument("-d", "--database", metavar="DIR", help="Database and scripts are stored in DIR (default: %s, environment IGORSERVER_DIR)" % DEFAULTDIR, default=DEFAULTDIR)
parser.add_argument("-p", "--port", metavar="PORT", type=int, help="Port to serve on (default: 9333, environment IGORSERVER_PORT)", default=DEFAULTPORT)
parser.add_argument("-s", "--nossl", action="store_true", help="Do no use https (ssl) on the service, even if certificates are available")
parser.add_argument("--debug", action="store_true", help="Enable debug output")
parser.add_argument("--advertise", action="store_true", help="Advertise service through bonjour/zeroconf")
parser.add_argument("--version", action="store_true", help="Print version and exit")
parser.add_argument("--profile", action="store_true", help="Enable Python profiler (debugging Igor only)")
parser.add_argument('--logLevel', metavar='SPEC', help="Set log levels (comma-separated list of [loggername:]LOGLEVEL)")
args = parser.parse_args()
myLogger.install(args.logLevel)
if args.version:
print VERSION
sys.exit(0)
if args.debug:
callUrl.DEBUG = True
sseListener.DEBUG = True
actions.DEBUG = True
xmlDatabase.DEBUG = True
webApp.DEBUG = True
datadir = args.database
print 'igorServer %s running from %s' % (VERSION, sys.argv[0])
try:
igorServer = IgorServer(datadir, args.port, args.advertise, profile=args.profile, nossl=args.nossl)
except IOError, arg:
print >>sys.stderr, '%s: Cannot open database: %s' % (sys.argv[0], arg)
print >>sys.stderr, '%s: Use --help option to see command line arguments' % sys.argv[0]
sys.exit(1)
igorServer.run()
#
# We need to hack the import lock. In case we get here via the easy_install igorServer script
# we are inside an __import__(), and we hold the lock. This means other threads cannot import
# and we hang once a web request comes in. We "work around" this by releasing the lock.
#
hasImportLock = imp.lock_held()
if hasImportLock:
imp.release_lock()
main()
if hasImportLock:
imp.acquire_lock()
|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
import botocore
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import InvalidDNSNameError, ClientError
from botocore.exceptions import MetadataRetrievalError
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.vendored import requests
from botocore.compat import OrderedDict, six
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError)
S3_ACCELERATE_WHITELIST = ['dualstack']
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL,
env=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
def _get_request(self, url, timeout, num_attempts=1):
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise _RetriesExceededError()
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except RETRYABLE_HTTP_ERRORS as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts,
).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
if n == 1:
if not bucket_name.isalnum():
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
error = response[1].get('Error', {})
error_code = error.get('Code')
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name in ['HeadObject', 'HeadBucket']
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = requests.Session()
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
response = self._session.get(full_url, headers=headers,
timeout=timeout)
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg="Received non 200 response (%s) from ECS metadata: %s"
% (response.status_code, response.text))
try:
return json.loads(response.text)
except ValueError:
raise MetadataRetrievalError(
error_msg=("Unable to parse JSON returned from "
"ECS metadata: %s" % response.text))
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
Only allow s3 to redirect once
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import time
import logging
import datetime
import hashlib
import binascii
import functools
import weakref
import random
import os
import dateutil.parser
from dateutil.tz import tzlocal, tzutc
import botocore
from botocore.exceptions import InvalidExpressionError, ConfigNotFound
from botocore.exceptions import InvalidDNSNameError, ClientError
from botocore.exceptions import MetadataRetrievalError
from botocore.compat import json, quote, zip_longest, urlsplit, urlunsplit
from botocore.vendored import requests
from botocore.compat import OrderedDict, six
logger = logging.getLogger(__name__)
DEFAULT_METADATA_SERVICE_TIMEOUT = 1
METADATA_SECURITY_CREDENTIALS_URL = (
'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
)
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RETRYABLE_HTTP_ERRORS = (requests.Timeout, requests.ConnectionError)
S3_ACCELERATE_WHITELIST = ['dualstack']
class _RetriesExceededError(Exception):
"""Internal exception used when the number of retries are exceeded."""
pass
def is_json_value_header(shape):
"""Determines if the provided shape is the special header type jsonvalue.
:type shape: botocore.shape
:param shape: Shape to be inspected for the jsonvalue trait.
:return: True if this type is a jsonvalue, False otherwise
:rtype: Bool
"""
return (hasattr(shape, 'serialization') and
shape.serialization.get('jsonvalue', False) and
shape.serialization.get('location') == 'header' and
shape.type_name == 'string')
def get_service_module_name(service_model):
"""Returns the module name for a service
This is the value used in both the documentation and client class name
"""
name = service_model.metadata.get(
'serviceAbbreviation',
service_model.metadata.get(
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub(r'\W+', '', name)
return name
def normalize_url_path(path):
if not path:
return '/'
return remove_dot_segments(path)
def remove_dot_segments(url):
# RFC 3986, section 5.2.4 "Remove Dot Segments"
# Also, AWS services require consecutive slashes to be removed,
# so that's done here as well
if not url:
return ''
input_url = url.split('/')
output_list = []
for x in input_url:
if x and x != '.':
if x == '..':
if output_list:
output_list.pop()
else:
output_list.append(x)
if url[0] == '/':
first = '/'
else:
first = ''
if url[-1] == '/' and output_list:
last = '/'
else:
last = ''
return first + '/'.join(output_list) + last
def validate_jmespath_for_set(expression):
# Validates a limited jmespath expression to determine if we can set a
# value based on it. Only works with dotted paths.
if not expression or expression == '.':
raise InvalidExpressionError(expression=expression)
for invalid in ['[', ']', '*']:
if invalid in expression:
raise InvalidExpressionError(expression=expression)
def set_value_from_jmespath(source, expression, value, is_first=True):
# This takes a (limited) jmespath-like expression & can set a value based
# on it.
# Limitations:
# * Only handles dotted lookups
# * No offsets/wildcards/slices/etc.
if is_first:
validate_jmespath_for_set(expression)
bits = expression.split('.', 1)
current_key, remainder = bits[0], bits[1] if len(bits) > 1 else ''
if not current_key:
raise InvalidExpressionError(expression=expression)
if remainder:
if current_key not in source:
# We've got something in the expression that's not present in the
# source (new key). If there's any more bits, we'll set the key
# with an empty dictionary.
source[current_key] = {}
return set_value_from_jmespath(
source[current_key],
remainder,
value,
is_first=False
)
# If we're down to a single key, set it.
source[current_key] = value
class InstanceMetadataFetcher(object):
def __init__(self, timeout=DEFAULT_METADATA_SERVICE_TIMEOUT,
num_attempts=1, url=METADATA_SECURITY_CREDENTIALS_URL,
env=None):
self._timeout = timeout
self._num_attempts = num_attempts
self._url = url
if env is None:
env = os.environ.copy()
self._disabled = env.get('AWS_EC2_METADATA_DISABLED', 'false').lower()
self._disabled = self._disabled == 'true'
def _get_request(self, url, timeout, num_attempts=1):
if self._disabled:
logger.debug("Access to EC2 metadata has been disabled.")
raise _RetriesExceededError()
for i in range(num_attempts):
try:
response = requests.get(url, timeout=timeout)
except RETRYABLE_HTTP_ERRORS as e:
logger.debug("Caught exception while trying to retrieve "
"credentials: %s", e, exc_info=True)
else:
if response.status_code == 200:
return response
raise _RetriesExceededError()
def retrieve_iam_role_credentials(self):
data = {}
url = self._url
timeout = self._timeout
num_attempts = self._num_attempts
try:
r = self._get_request(url, timeout, num_attempts)
if r.content:
fields = r.content.decode('utf-8').split('\n')
for field in fields:
if field.endswith('/'):
data[field[0:-1]] = self.retrieve_iam_role_credentials(
url + field, timeout, num_attempts)
else:
val = self._get_request(
url + field,
timeout=timeout,
num_attempts=num_attempts,
).content.decode('utf-8')
if val[0] == '{':
val = json.loads(val)
data[field] = val
else:
logger.debug("Metadata service returned non 200 status code "
"of %s for url: %s, content body: %s",
r.status_code, url, r.content)
except _RetriesExceededError:
logger.debug("Max number of attempts exceeded (%s) when "
"attempting to retrieve data from metadata service.",
num_attempts)
# We sort for stable ordering. In practice, this should only consist
# of one role, but may need revisiting if this expands in the future.
final_data = {}
for role_name in sorted(data):
final_data = {
'role_name': role_name,
'access_key': data[role_name]['AccessKeyId'],
'secret_key': data[role_name]['SecretAccessKey'],
'token': data[role_name]['Token'],
'expiry_time': data[role_name]['Expiration'],
}
return final_data
def merge_dicts(dict1, dict2, append_lists=False):
"""Given two dict, merge the second dict into the first.
The dicts can have arbitrary nesting.
:param append_lists: If true, instead of clobbering a list with the new
value, append all of the new values onto the original list.
"""
for key in dict2:
if isinstance(dict2[key], dict):
if key in dict1 and key in dict2:
merge_dicts(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
# If the value is a list and the ``append_lists`` flag is set,
# append the new values onto the original list
elif isinstance(dict2[key], list) and append_lists:
# The value in dict1 must be a list in order to append new
# values onto it.
if key in dict1 and isinstance(dict1[key], list):
dict1[key].extend(dict2[key])
else:
dict1[key] = dict2[key]
else:
# At scalar types, we iterate and merge the
# current dict that we're on.
dict1[key] = dict2[key]
def parse_key_val_file(filename, _open=open):
try:
with _open(filename) as f:
contents = f.read()
return parse_key_val_file_contents(contents)
except OSError:
raise ConfigNotFound(path=filename)
def parse_key_val_file_contents(contents):
# This was originally extracted from the EC2 credential provider, which was
# fairly lenient in its parsing. We only try to parse key/val pairs if
# there's a '=' in the line.
final = {}
for line in contents.splitlines():
if '=' not in line:
continue
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
final[key] = val
return final
def percent_encode_sequence(mapping, safe=SAFE_CHARS):
"""Urlencode a dict or list into a string.
This is similar to urllib.urlencode except that:
* It uses quote, and not quote_plus
* It has a default list of safe chars that don't need
to be encoded, which matches what AWS services expect.
If any value in the input ``mapping`` is a list type,
then each list element wil be serialized. This is the equivalent
to ``urlencode``'s ``doseq=True`` argument.
This function should be preferred over the stdlib
``urlencode()`` function.
:param mapping: Either a dict to urlencode or a list of
``(key, value)`` pairs.
"""
encoded_pairs = []
if hasattr(mapping, 'items'):
pairs = mapping.items()
else:
pairs = mapping
for key, value in pairs:
if isinstance(value, list):
for element in value:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(element)))
else:
encoded_pairs.append('%s=%s' % (percent_encode(key),
percent_encode(value)))
return '&'.join(encoded_pairs)
def percent_encode(input_str, safe=SAFE_CHARS):
"""Urlencodes a string.
Whereas percent_encode_sequence handles taking a dict/sequence and
producing a percent encoded string, this function deals only with
taking a string (not a dict/sequence) and percent encoding it.
If given the binary type, will simply URL encode it. If given the
text type, will produce the binary type by UTF-8 encoding the
text. If given something else, will convert it to the the text type
first.
"""
# If its not a binary or text string, make it a text string.
if not isinstance(input_str, (six.binary_type, six.text_type)):
input_str = six.text_type(input_str)
# If it's not bytes, make it bytes by UTF-8 encoding it.
if not isinstance(input_str, six.binary_type):
input_str = input_str.encode('utf-8')
return quote(input_str, safe=safe)
def parse_timestamp(value):
"""Parse a timestamp into a datetime object.
Supported formats:
* iso8601
* rfc822
* epoch (value is an integer)
This will return a ``datetime.datetime`` object.
"""
if isinstance(value, (int, float)):
# Possibly an epoch time.
return datetime.datetime.fromtimestamp(value, tzlocal())
else:
try:
return datetime.datetime.fromtimestamp(float(value), tzlocal())
except (TypeError, ValueError):
pass
try:
# In certain cases, a timestamp marked with GMT can be parsed into a
# different time zone, so here we provide a context which will
# enforce that GMT == UTC.
return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()})
except (TypeError, ValueError) as e:
raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def parse_to_aware_datetime(value):
"""Converted the passed in value to a datetime object with tzinfo.
This function can be used to normalize all timestamp inputs. This
function accepts a number of different types of inputs, but
will always return a datetime.datetime object with time zone
information.
The input param ``value`` can be one of several types:
* A datetime object (both naive and aware)
* An integer representing the epoch time (can also be a string
of the integer, i.e '0', instead of 0). The epoch time is
considered to be UTC.
* An iso8601 formatted timestamp. This does not need to be
a complete timestamp, it can contain just the date portion
without the time component.
The returned value will be a datetime object that will have tzinfo.
If no timezone info was provided in the input value, then UTC is
assumed, not local time.
"""
# This is a general purpose method that handles several cases of
# converting the provided value to a string timestamp suitable to be
# serialized to an http request. It can handle:
# 1) A datetime.datetime object.
if isinstance(value, datetime.datetime):
datetime_obj = value
else:
# 2) A string object that's formatted as a timestamp.
# We document this as being an iso8601 timestamp, although
# parse_timestamp is a bit more flexible.
datetime_obj = parse_timestamp(value)
if datetime_obj.tzinfo is None:
# I think a case would be made that if no time zone is provided,
# we should use the local time. However, to restore backwards
# compat, the previous behavior was to assume UTC, which is
# what we're going to do here.
datetime_obj = datetime_obj.replace(tzinfo=tzutc())
else:
datetime_obj = datetime_obj.astimezone(tzutc())
return datetime_obj
def datetime2timestamp(dt, default_timezone=None):
"""Calculate the timestamp based on the given datetime instance.
:type dt: datetime
:param dt: A datetime object to be converted into timestamp
:type default_timezone: tzinfo
:param default_timezone: If it is provided as None, we treat it as tzutc().
But it is only used when dt is a naive datetime.
:returns: The timestamp
"""
epoch = datetime.datetime(1970, 1, 1)
if dt.tzinfo is None:
if default_timezone is None:
default_timezone = tzutc()
dt = dt.replace(tzinfo=default_timezone)
d = dt.replace(tzinfo=None) - dt.utcoffset() - epoch
if hasattr(d, "total_seconds"):
return d.total_seconds() # Works in Python 2.7+
return (d.microseconds + (d.seconds + d.days * 24 * 3600) * 10**6) / 10**6
def calculate_sha256(body, as_hex=False):
"""Calculate a sha256 checksum.
This method will calculate the sha256 checksum of a file like
object. Note that this method will iterate through the entire
file contents. The caller is responsible for ensuring the proper
starting position of the file and ``seek()``'ing the file back
to its starting location if other consumers need to read from
the file like object.
:param body: Any file like object. The file must be opened
in binary mode such that a ``.read()`` call returns bytes.
:param as_hex: If True, then the hex digest is returned.
If False, then the digest (as binary bytes) is returned.
:returns: The sha256 checksum
"""
checksum = hashlib.sha256()
for chunk in iter(lambda: body.read(1024 * 1024), b''):
checksum.update(chunk)
if as_hex:
return checksum.hexdigest()
else:
return checksum.digest()
def calculate_tree_hash(body):
"""Calculate a tree hash checksum.
For more information see:
http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html
:param body: Any file like object. This has the same constraints as
the ``body`` param in calculate_sha256
:rtype: str
:returns: The hex version of the calculated tree hash
"""
chunks = []
required_chunk_size = 1024 * 1024
sha256 = hashlib.sha256
for chunk in iter(lambda: body.read(required_chunk_size), b''):
chunks.append(sha256(chunk).digest())
if not chunks:
return sha256(b'').hexdigest()
while len(chunks) > 1:
new_chunks = []
for first, second in _in_pairs(chunks):
if second is not None:
new_chunks.append(sha256(first + second).digest())
else:
# We're at the end of the list and there's no pair left.
new_chunks.append(first)
chunks = new_chunks
return binascii.hexlify(chunks[0]).decode('ascii')
def _in_pairs(iterable):
# Creates iterator that iterates over the list in pairs:
# for a, b in _in_pairs([0, 1, 2, 3, 4]):
# print(a, b)
#
# will print:
# 0, 1
# 2, 3
# 4, None
shared_iter = iter(iterable)
# Note that zip_longest is a compat import that uses
# the itertools izip_longest. This creates an iterator,
# this call below does _not_ immediately create the list
# of pairs.
return zip_longest(shared_iter, shared_iter)
class CachedProperty(object):
"""A read only property that caches the initially computed value.
This descriptor will only call the provided ``fget`` function once.
Subsequent access to this property will return the cached value.
"""
def __init__(self, fget):
self._fget = fget
def __get__(self, obj, cls):
if obj is None:
return self
else:
computed_value = self._fget(obj)
obj.__dict__[self._fget.__name__] = computed_value
return computed_value
class ArgumentGenerator(object):
"""Generate sample input based on a shape model.
This class contains a ``generate_skeleton`` method that will take
an input/output shape (created from ``botocore.model``) and generate
a sample dictionary corresponding to the input/output shape.
The specific values used are place holder values. For strings either an
empty string or the member name can be used, for numbers 0 or 0.0 is used.
The intended usage of this class is to generate the *shape* of the input
structure.
This can be useful for operations that have complex input shapes.
This allows a user to just fill in the necessary data instead of
worrying about the specific structure of the input arguments.
Example usage::
s = botocore.session.get_session()
ddb = s.get_service_model('dynamodb')
arg_gen = ArgumentGenerator()
sample_input = arg_gen.generate_skeleton(
ddb.operation_model('CreateTable').input_shape)
print("Sample input for dynamodb.CreateTable: %s" % sample_input)
"""
def __init__(self, use_member_names=False):
self._use_member_names = use_member_names
def generate_skeleton(self, shape):
"""Generate a sample input.
:type shape: ``botocore.model.Shape``
:param shape: The input shape.
:return: The generated skeleton input corresponding to the
provided input shape.
"""
stack = []
return self._generate_skeleton(shape, stack)
def _generate_skeleton(self, shape, stack, name=''):
stack.append(shape.name)
try:
if shape.type_name == 'structure':
return self._generate_type_structure(shape, stack)
elif shape.type_name == 'list':
return self._generate_type_list(shape, stack)
elif shape.type_name == 'map':
return self._generate_type_map(shape, stack)
elif shape.type_name == 'string':
if self._use_member_names:
return name
if shape.enum:
return random.choice(shape.enum)
return ''
elif shape.type_name in ['integer', 'long']:
return 0
elif shape.type_name == 'float':
return 0.0
elif shape.type_name == 'boolean':
return True
elif shape.type_name == 'timestamp':
return datetime.datetime(1970, 1, 1, 0, 0, 0)
finally:
stack.pop()
def _generate_type_structure(self, shape, stack):
if stack.count(shape.name) > 1:
return {}
skeleton = OrderedDict()
for member_name, member_shape in shape.members.items():
skeleton[member_name] = self._generate_skeleton(
member_shape, stack, name=member_name)
return skeleton
def _generate_type_list(self, shape, stack):
# For list elements we've arbitrarily decided to
# return two elements for the skeleton list.
name = ''
if self._use_member_names:
name = shape.member.name
return [
self._generate_skeleton(shape.member, stack, name),
]
def _generate_type_map(self, shape, stack):
key_shape = shape.key
value_shape = shape.value
assert key_shape.type_name == 'string'
return OrderedDict([
('KeyName', self._generate_skeleton(value_shape, stack)),
])
def is_valid_endpoint_url(endpoint_url):
"""Verify the endpoint_url is valid.
:type endpoint_url: string
:param endpoint_url: An endpoint_url. Must have at least a scheme
and a hostname.
:return: True if the endpoint url is valid. False otherwise.
"""
parts = urlsplit(endpoint_url)
hostname = parts.hostname
if hostname is None:
return False
if len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)
def check_dns_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
Even though "." characters are perfectly valid in this DNS
naming scheme, we are going to punt on any name containing a
"." character because these will cause SSL cert validation
problems if we try to use virtual-hosting style addressing.
"""
if '.' in bucket_name:
return False
n = len(bucket_name)
if n < 3 or n > 63:
# Wrong length
return False
if n == 1:
if not bucket_name.isalnum():
return False
match = LABEL_RE.match(bucket_name)
if match is None or match.end() != len(bucket_name):
return False
return True
def fix_s3_host(request, signature_version, region_name,
default_endpoint_url=None, **kwargs):
"""
This handler looks at S3 requests just before they are signed.
If there is a bucket name on the path (true for everything except
ListAllBuckets) it checks to see if that bucket name conforms to
the DNS naming conventions. If it does, it alters the request to
use ``virtual hosting`` style addressing rather than ``path-style``
addressing.
"""
if request.context.get('use_global_endpoint', False):
default_endpoint_url = 's3.amazonaws.com'
try:
switch_to_virtual_host_style(
request, signature_version, default_endpoint_url)
except InvalidDNSNameError as e:
bucket_name = e.kwargs['bucket_name']
logger.debug('Not changing URI, bucket is not DNS compatible: %s',
bucket_name)
def switch_to_virtual_host_style(request, signature_version,
default_endpoint_url=None, **kwargs):
"""
This is a handler to force virtual host style s3 addressing no matter
the signature version (which is taken in consideration for the default
case). If the bucket is not DNS compatible an InvalidDNSName is thrown.
:param request: A AWSRequest object that is about to be sent.
:param signature_version: The signature version to sign with
:param default_endpoint_url: The endpoint to use when switching to a
virtual style. If None is supplied, the virtual host will be
constructed from the url of the request.
"""
if request.auth_path is not None:
# The auth_path has already been applied (this may be a
# retried request). We don't need to perform this
# customization again.
return
elif _is_get_bucket_location_request(request):
# For the GetBucketLocation response, we should not be using
# the virtual host style addressing so we can avoid any sigv4
# issues.
logger.debug("Request is GetBucketLocation operation, not checking "
"for DNS compatibility.")
return
parts = urlsplit(request.url)
request.auth_path = parts.path
path_parts = parts.path.split('/')
# Retrieve what the endpoint we will be prepending the bucket name to.
if default_endpoint_url is None:
default_endpoint_url = parts.netloc
if len(path_parts) > 1:
bucket_name = path_parts[1]
if not bucket_name:
# If the bucket name is empty we should not be checking for
# dns compatibility.
return
logger.debug('Checking for DNS compatible bucket for: %s',
request.url)
if check_dns_name(bucket_name):
# If the operation is on a bucket, the auth_path must be
# terminated with a '/' character.
if len(path_parts) == 2:
if request.auth_path[-1] != '/':
request.auth_path += '/'
path_parts.remove(bucket_name)
# At the very least the path must be a '/', such as with the
# CreateBucket operation when DNS style is being used. If this
# is not used you will get an empty path which is incorrect.
path = '/'.join(path_parts) or '/'
global_endpoint = default_endpoint_url
host = bucket_name + '.' + global_endpoint
new_tuple = (parts.scheme, host, path,
parts.query, '')
new_uri = urlunsplit(new_tuple)
request.url = new_uri
logger.debug('URI updated to: %s', new_uri)
else:
raise InvalidDNSNameError(bucket_name=bucket_name)
def _is_get_bucket_location_request(request):
return request.url.endswith('?location')
def instance_cache(func):
"""Method decorator for caching method calls to a single instance.
**This is not a general purpose caching decorator.**
In order to use this, you *must* provide an ``_instance_cache``
attribute on the instance.
This decorator is used to cache method calls. The cache is only
scoped to a single instance though such that multiple instances
will maintain their own cache. In order to keep things simple,
this decorator requires that you provide an ``_instance_cache``
attribute on your instance.
"""
func_name = func.__name__
@functools.wraps(func)
def _cache_guard(self, *args, **kwargs):
cache_key = (func_name, args)
if kwargs:
kwarg_items = tuple(sorted(kwargs.items()))
cache_key = (func_name, args, kwarg_items)
result = self._instance_cache.get(cache_key)
if result is not None:
return result
result = func(self, *args, **kwargs)
self._instance_cache[cache_key] = result
return result
return _cache_guard
def switch_host_s3_accelerate(request, operation_name, **kwargs):
"""Switches the current s3 endpoint with an S3 Accelerate endpoint"""
# Note that when registered the switching of the s3 host happens
# before it gets changed to virtual. So we are not concerned with ensuring
# that the bucket name is translated to the virtual style here and we
# can hard code the Accelerate endpoint.
parts = urlsplit(request.url).netloc.split('.')
parts = [p for p in parts if p in S3_ACCELERATE_WHITELIST]
endpoint = 'https://s3-accelerate.'
if len(parts) > 0:
endpoint += '.'.join(parts) + '.'
endpoint += 'amazonaws.com'
if operation_name in ['ListBuckets', 'CreateBucket', 'DeleteBucket']:
return
_switch_hosts(request, endpoint, use_new_scheme=False)
def switch_host_with_param(request, param_name):
"""Switches the host using a parameter value from a JSON request body"""
request_json = json.loads(request.data.decode('utf-8'))
if request_json.get(param_name):
new_endpoint = request_json[param_name]
_switch_hosts(request, new_endpoint)
def _switch_hosts(request, new_endpoint, use_new_scheme=True):
final_endpoint = _get_new_endpoint(
request.url, new_endpoint, use_new_scheme)
request.url = final_endpoint
def _get_new_endpoint(original_endpoint, new_endpoint, use_new_scheme=True):
new_endpoint_components = urlsplit(new_endpoint)
original_endpoint_components = urlsplit(original_endpoint)
scheme = original_endpoint_components.scheme
if use_new_scheme:
scheme = new_endpoint_components.scheme
final_endpoint_components = (
scheme,
new_endpoint_components.netloc,
original_endpoint_components.path,
original_endpoint_components.query,
''
)
final_endpoint = urlunsplit(final_endpoint_components)
logger.debug('Updating URI from %s to %s' % (
original_endpoint, final_endpoint))
return final_endpoint
def deep_merge(base, extra):
"""Deeply two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
for key in extra:
# If the key represents a dict on both given dicts, merge the sub-dicts
if key in base and isinstance(base[key], dict)\
and isinstance(extra[key], dict):
deep_merge(base[key], extra[key])
continue
# Otherwise, set the key on the base to be the value of the extra.
base[key] = extra[key]
class S3RegionRedirector(object):
def __init__(self, endpoint_bridge, client, cache=None):
self._endpoint_resolver = endpoint_bridge
self._cache = cache
if self._cache is None:
self._cache = {}
# This needs to be a weak ref in order to prevent memory leaks on
# python 2.6
self._client = weakref.proxy(client)
def register(self, event_emitter=None):
emitter = event_emitter or self._client.meta.events
emitter.register('needs-retry.s3', self.redirect_from_error)
emitter.register('before-call.s3', self.set_request_url)
emitter.register('before-parameter-build.s3',
self.redirect_from_cache)
def redirect_from_error(self, request_dict, response, operation, **kwargs):
"""
An S3 request sent to the wrong region will return an error that
contains the endpoint the request should be sent to. This handler
will add the redirect information to the signing context and then
redirect the request.
"""
if response is None:
# This could be none if there was a ConnectionError or other
# transport error.
return
if request_dict.get('context', {}).get('s3_redirected'):
return
error = response[1].get('Error', {})
error_code = error.get('Code')
# We have to account for 400 responses because
# if we sign a Head* request with the wrong region,
# we'll get a 400 Bad Request but we won't get a
# body saying it's an "AuthorizationHeaderMalformed".
is_special_head_object = (
error_code in ['301', '400'] and
operation.name in ['HeadObject', 'HeadBucket']
)
is_wrong_signing_region = (
error_code == 'AuthorizationHeaderMalformed' and
'Region' in error
)
is_permanent_redirect = error_code == 'PermanentRedirect'
if not any([is_special_head_object, is_wrong_signing_region,
is_permanent_redirect]):
return
bucket = request_dict['context']['signing']['bucket']
client_region = request_dict['context'].get('client_region')
new_region = self.get_bucket_region(bucket, response)
if new_region is None:
logger.debug(
"S3 client configured for region %s but the bucket %s is not "
"in that region and the proper region could not be "
"automatically determined." % (client_region, bucket))
return
logger.debug(
"S3 client configured for region %s but the bucket %s is in region"
" %s; Please configure the proper region to avoid multiple "
"unnecessary redirects and signing attempts." % (
client_region, bucket, new_region))
endpoint = self._endpoint_resolver.resolve('s3', new_region)
endpoint = endpoint['endpoint_url']
signing_context = {
'region': new_region,
'bucket': bucket,
'endpoint': endpoint
}
request_dict['context']['signing'] = signing_context
self._cache[bucket] = signing_context
self.set_request_url(request_dict, request_dict['context'])
request_dict['context']['s3_redirected'] = True
# Return 0 so it doesn't wait to retry
return 0
def get_bucket_region(self, bucket, response):
"""
There are multiple potential sources for the new region to redirect to,
but they aren't all universally available for use. This will try to
find region from response elements, but will fall back to calling
HEAD on the bucket if all else fails.
:param bucket: The bucket to find the region for. This is necessary if
the region is not available in the error response.
:param response: A response representing a service request that failed
due to incorrect region configuration.
"""
# First try to source the region from the headers.
service_response = response[1]
response_headers = service_response['ResponseMetadata']['HTTPHeaders']
if 'x-amz-bucket-region' in response_headers:
return response_headers['x-amz-bucket-region']
# Next, check the error body
region = service_response.get('Error', {}).get('Region', None)
if region is not None:
return region
# Finally, HEAD the bucket. No other choice sadly.
try:
response = self._client.head_bucket(Bucket=bucket)
headers = response['ResponseMetadata']['HTTPHeaders']
except ClientError as e:
headers = e.response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region', None)
return region
def set_request_url(self, params, context, **kwargs):
endpoint = context.get('signing', {}).get('endpoint', None)
if endpoint is not None:
params['url'] = _get_new_endpoint(params['url'], endpoint, False)
def redirect_from_cache(self, params, context, **kwargs):
"""
This handler retrieves a given bucket's signing context from the cache
and adds it into the request context.
"""
bucket = params.get('Bucket')
signing_context = self._cache.get(bucket)
if signing_context is not None:
context['signing'] = signing_context
else:
context['signing'] = {'bucket': bucket}
class ContainerMetadataFetcher(object):
TIMEOUT_SECONDS = 2
RETRY_ATTEMPTS = 3
SLEEP_TIME = 1
IP_ADDRESS = '169.254.170.2'
_ALLOWED_HOSTS = [IP_ADDRESS, 'localhost', '127.0.0.1']
def __init__(self, session=None, sleep=time.sleep):
if session is None:
session = requests.Session()
self._session = session
self._sleep = sleep
def retrieve_full_uri(self, full_url, headers=None):
"""Retrieve JSON metadata from container metadata.
:type full_url: str
:param full_url: The full URL of the metadata service.
This should include the scheme as well, e.g
"http://localhost:123/foo"
"""
self._validate_allowed_url(full_url)
return self._retrieve_credentials(full_url, headers)
def _validate_allowed_url(self, full_url):
parsed = botocore.compat.urlparse(full_url)
is_whitelisted_host = self._check_if_whitelisted_host(
parsed.hostname)
if not is_whitelisted_host:
raise ValueError(
"Unsupported host '%s'. Can only "
"retrieve metadata from these hosts: %s" %
(parsed.hostname, ', '.join(self._ALLOWED_HOSTS)))
def _check_if_whitelisted_host(self, host):
if host in self._ALLOWED_HOSTS:
return True
return False
def retrieve_uri(self, relative_uri):
"""Retrieve JSON metadata from ECS metadata.
:type relative_uri: str
:param relative_uri: A relative URI, e.g "/foo/bar?id=123"
:return: The parsed JSON response.
"""
full_url = self.full_url(relative_uri)
return self._retrieve_credentials(full_url)
def _retrieve_credentials(self, full_url, extra_headers=None):
headers = {'Accept': 'application/json'}
if extra_headers is not None:
headers.update(extra_headers)
attempts = 0
while True:
try:
return self._get_response(full_url, headers, self.TIMEOUT_SECONDS)
except MetadataRetrievalError as e:
logger.debug("Received error when attempting to retrieve "
"container metadata: %s", e, exc_info=True)
self._sleep(self.SLEEP_TIME)
attempts += 1
if attempts >= self.RETRY_ATTEMPTS:
raise
def _get_response(self, full_url, headers, timeout):
try:
response = self._session.get(full_url, headers=headers,
timeout=timeout)
if response.status_code != 200:
raise MetadataRetrievalError(
error_msg="Received non 200 response (%s) from ECS metadata: %s"
% (response.status_code, response.text))
try:
return json.loads(response.text)
except ValueError:
raise MetadataRetrievalError(
error_msg=("Unable to parse JSON returned from "
"ECS metadata: %s" % response.text))
except RETRYABLE_HTTP_ERRORS as e:
error_msg = ("Received error when attempting to retrieve "
"ECS metadata: %s" % e)
raise MetadataRetrievalError(error_msg=error_msg)
def full_url(self, relative_uri):
return 'http://%s%s' % (self.IP_ADDRESS, relative_uri)
|
import os
from uuid import uuid1 as uuid
from sqlalchemy import create_engine
from sqlalchemy import Column, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
#engine = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'], convert_unicode=True, echo=False)
#db_session = scoped_session(sessionmaker(autocommit=False,
# autoflush=True,
# bind=engine))
app = Flask('didactic_spork')
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app, session_options={'autoflush':False})
def default_uuid():
return str(uuid())
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
db.create_all()
db.session.flush()
def clear_db():
import models
db.session.rollback()
db.drop_all()
Fixed table creation and silences track monidifcations notification
import os
from uuid import uuid1 as uuid
from sqlalchemy import create_engine
from sqlalchemy import Column, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
#engine = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'], convert_unicode=True, echo=False)
#db_session = scoped_session(sessionmaker(autocommit=False,
# autoflush=True,
# bind=engine))
app = Flask('didactic_spork')
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
def default_uuid():
return str(uuid())
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
models.Base.metadata.create_all(bind=db.engine)
def clear_db():
import models
db.session.rollback()
models.Base.metadata.drop_all(bind=db.engine)
|
Simplify MailResponse class by not implementing smtp.IMessage.
We aren't using it as an SMTP message delivery class anywhere, so
there's no need to have the extra methods.
* REMOVE bridgedb.email.server.MailResponse.lineReceived()
* REMOVE bridgedb.email.server.MailResponse.eomReceived()
* REMOVE bridgedb.email.server.MailResponse.connectionLost()
* REMOVE zope.interface implementation declaration of
twisted.mail.smtp.IMessage by b.e.s.MailResponse.
|
import logging
import re
import socket
try:
import ssl as _ssl
_hush_pyflakes = [_ssl]
del _hush_pyflakes
except ImportError:
_ssl = None # No SSL support
from kitnirc.events import NUMERIC_EVENTS
from kitnirc.user import User
_log = logging.getLogger(__name__)
class Channel(object):
"""Information about an IRC channel.
This class keeps track of things like who is in a channel, the channel
topic, modes, and so on.
"""
def __init__(self, name):
self.name = name
self.topic = None
self.members = {}
self.modes = {}
def __str__(self):
return self.name
def __repr__(self):
return "kitnirc.client.Channel(%r)" % self.name
def add_user(self, user):
"""Adds a user to the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick in self.members:
_log.warning("Ignoring request to add user '%s' to channel '%s' "
"because that user is already in the member list.",
user, self.name)
return
self.members[user.nick] = user
_log.debug("Added '%s' to channel '%s'", user, self.name)
def remove_user(self, user):
"""Removes a user from the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick not in self.members:
_log.warning("Ignoring request to remove user '%s' from channel "
"'%s' because that user is already in the member "
"list.", user, self.name)
return
del self.members[user.nick]
_log.debug("Removed '%s' from channel '%s'", user, self.name)
class Host(object):
"""Information about an IRC server.
This class keeps track of things like what channels a client is in,
who is in those channels, and other such details.
"""
def __init__(self, host, port):
self.host = host
# We also keep track of the host we originally connected to - e.g.
# if we connected to a round-robin alias.
self.original_host = host
self.port = port
self.password = None
self.motd = "" # The full text of the MOTD, once received
self._motd = [] # Receive buffer; do not use for reading
# Buffer for information from WHOIS response lines
self._whois = {}
# The channels we're in, keyed by channel name
self.channels = {}
# What features modes are available on the server
self.features = dict()
self.user_modes = set()
self.channel_modes = set()
# Miscellaneous information about the server
self.version = None
self.created = None
def __str__(self):
return self.host
def __repr__(self):
return "kitnirc.client.Host(%r, %r)" % (self.host, self.port)
def add_channel(self, channel):
if not isinstance(channel, Channel):
channel = Channel(channel)
if channel.name in self.channels:
_log.warning("Ignoring request to add a channel that has already "
"been added: '%s'", channel)
return
self.channels[channel.name] = channel
_log.info("Entered channel %s.", channel)
def remove_channel(self, channel):
if isinstance(channel, Channel):
channel = channel.name
if channel not in self.channels:
_log.warning("Ignoring request to remove a channel that hasn't "
"been added: '%s'", channel)
return
del self.channels[channel]
_log.info("Left channel %s.", channel)
class Client(object):
"""An IRC client.
This class wraps a connection to a single IRC network and provides
additional functionality (e.g. tracking of nicks and channels).
"""
def __init__(self, host=None, port=6667):
if host:
self.server = Host(host, port)
else:
self.server = None
self.connected = False
self.socket = None
self._stop = False
self._buffer = ""
# Queues for event dispatching.
self.event_handlers = {
###### CONNECTION-LEVEL EVENTS ######
# Fires while the client is connecting, when a password should be
# supplied. If nothing supplies a password, the password argument
# of connect() will be used (if set).
"PASSWORD": [],
# Fires after the client's socket connects.
"CONNECTED": [on_connect],
# Fires every time a line is received
"LINE": [on_line],
# Fires whenever a line isn't handled by LINE
"RAWLINE": [],
###### IRC-LEVEL EVENTS ######
# Fires when receiving the 001 RPL_WELCOME message upon
# being recognized as a valid user by the IRC server.
"WELCOME": [],
# Fires when a privmsg is received
"PRIVMSG": [], # actor, recipient
# Fires when a notice is received
"NOTICE": [],
# Fires when a complete MOTD is received
"MOTD": [],
# Fires when a user joins a channel
"JOIN": [],
# Fires when a user parts a channel
"PART": [],
# Fires when a user quits the server
"QUIT": [],
# Fires when a user is kicked from a channel
"KICK": [],
# Fires when the list of users in a channel has been updated
"MEMBERS": [],
# Fires whenever a mode change occurs
"MODE": [],
# Fires when a WHOIS response is complete
"WHOIS": [],
# Fires when a channel topic changes
"TOPIC": [],
# Fires when someone invites us to a channel
"INVITE": [],
}
def add_handler(self, event, handler):
"""Adds a handler for a particular event.
Handlers are appended to the list, so a handler added earlier
will be called before a handler added later. If you wish to
insert a handler at another position, you should modify the
event_handlers property directly:
my_client.event_handlers['PRIVMSG'].insert(0, my_handler)
"""
if event not in self.event_handlers:
_log.info("Adding event handler for new event %s.", event)
self.event_handlers[event] = [handler]
else:
self.event_handlers[event].append(handler)
def dispatch_event(self, event, *args):
"""Dispatches an event.
Returns a boolean indicating whether or not a handler
suppressed further handling of the event (even the last).
"""
if event not in self.event_handlers:
_log.error("Dispatch requested for unknown event '%s'", event)
return False
elif event != "LINE":
_log.debug("Dispatching event %s %r", event, args)
try:
for handler in self.event_handlers[event]:
# (client, server, *args) : args are dependent on event
if handler(self, *args):
# Returning a truthy value supresses further handlers
# for this event.
return True
except Exception as e:
_log.exception("Error while processing event '%s': %r", event, e)
# Fall back to the RAWLINE event if LINE can't process it.
if event == "LINE":
return self.dispatch_event("RAWLINE", *args)
return False
def connect(self, nick, username=None, realname=None, password=None,
host=None, port=6667, ssl=None):
"""Connect to the server using the specified credentials.
Note: if host is specified here, both the host and port arguments
passed to Client.__init__ will be ignored.
If the 'ssl' argument is boolean true, will use SSL. If it is a
dictionary, will both use SSL and pass the contents as kwargs to
the ssl.wrap_socket() call.
"""
if host:
self.server = Host(host, port)
if self.server is None:
_log.error("Can't connect() without a host specified.")
return
self.user = User(nick)
self.user.username = username or nick
self.user.realname = realname or username or nick
_log.info("Connecting to %s as %s ...", self.server.host, nick)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if ssl and _ssl:
ssl_kwargs = ssl if isinstance(ssl, dict) else {}
self.socket = _ssl.wrap_socket(self.socket, **ssl_kwargs)
elif ssl:
_log.error("SSL requested but no SSL support available!")
return
self.socket.connect((self.server.host, self.server.port))
self.connected = True
_log.info("Connected to %s.", self.server.host)
# Allow an event handler to supply a password instead, if it wants
suppress_password = self.dispatch_event("PASSWORD")
if password and not suppress_password:
# We bypass our own send() function here to avoid logging passwords
_log.info("Sending server password.")
self.socket.send("PASS %s\r\n" % password)
self.server.password = password
self.dispatch_event('CONNECTED')
def disconnect(self, msg="Shutting down..."):
if not self.connected:
_log.warning("Disconnect requested from non-connected client (%s)",
self.server.host)
return
_log.info("Disconnecting from %s ...", self.server.host)
self._stop = True
self.send("QUIT", ":" + msg)
try:
self.socket.close()
except socket.error:
pass
def run(self):
"""Process events such as incoming data.
This method blocks indefinitely. It will only return after the
connection to the server is closed.
"""
self._stop = False # Allow re-starting the event loop
while not self._stop:
try:
self._buffer += self.socket.recv(4096)
except socket.error:
raise
lines = self._buffer.split("\n")
self._buffer = lines.pop() # Last line may not have been fully read
for line in lines:
line = line.rstrip("\r")
_log.debug("%s --> %s", self.server.host, line)
self.dispatch_event("LINE", line)
def send(self, *args):
"""Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
"""
msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args)
if "\n" in msg:
raise ValueError("Cannot send() a newline. Args: %r" % args)
_log.debug("%s <-- %s", self.server.host, msg)
self.socket.send(msg + "\r\n")
def nick(self, nick):
"""Attempt to set the nickname for this connection."""
_log.info("Requesting nick change to '%s'", nick)
self.send("NICK", nick)
def userinfo(self, username, realname=None):
"""Set the username and realname for this connection.
Note: this should only be called once, on connect. (The default
on-connect routine calls this automatically.)
"""
realname = realname or username
_log.info("Requesting user info update: username=%s realname=%s",
username, realname)
self.send("USER", username, socket.getfqdn(), self.server.host,
":%s" % realname) # Realname should always be prefixed by a colon
self.user.username = username
self.user.realname = realname
def msg(self, target, message):
"""Send a message to a user or channel."""
self.send("PRIVMSG", target, ":" + message)
def reply(self, incoming, user, message):
"""Replies to a user in a given channel or PM.
If the specified incoming is a user, simply sends a PM to user.
If the specified incoming is a channel, prefixes the message with the
user's nick and sends it to the channel.
This is specifically useful in creating responses to commands that can
be used in either a channel or in a PM, and responding to the person
who invoked the command.
"""
if not isinstance(user, User):
user = User(user)
if isinstance(incoming, User):
self.msg(user, message)
else:
self.msg(incoming, "%s: %s" % (user.nick, message))
def notice(self, target, message):
"""Send a NOTICE to a user or channel."""
self.send("NOTICE", target, ":" + message)
def ctcp(self, target, message):
"""Send a CTCP message to a user or channel."""
self.msg(target, "\x01%s\x01" % message)
def emote(self, target, message):
"""Sends an emote (/me ...) to a user or channel."""
self.ctcp(target, "ACTION %s" % message)
def join(self, target, key=None):
"""Attempt to join a channel.
The optional second argument is the channel key, if needed.
"""
chantypes = self.server.features.get("CHANTYPES", "#")
if not target or target[0] not in chantypes:
# Among other things, this prevents accidentally sending the
# "JOIN 0" command which actually removes you from all channels
_log.warning("Refusing to join channel that does not start "
"with one of '%s': %s", chantypes, target)
return False
if target in self.server.channels:
_log.warning("Ignoring request to join channel '%s' because we "
"are already in that channel.", target)
return False
_log.info("Joining channel %s ...", target)
self.send("JOIN", target, *([key] if key else []))
return True
def invite(self, channel, nick):
"""Attempt to invite a user to a channel."""
self.send("INVITE", nick, channel)
def part(self, target, message=None):
"""Part a channel."""
if target not in self.server.channels:
_log.warning("Ignoring request to part channel '%s' because we "
"are not in that channel.", target)
return
return False
self.send("PART", target, *([message] if message else []))
return True
def quit(self, message=None):
"""Quit the server (and stop the event loop).
This actually just calls .disconnect() with the provided message."""
self.disconnect(message or "Bye")
def kick(self, channel, nick, message=None):
"""Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
"""
self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
def whois(self, nick):
"""Request WHOIS information about a user."""
self.send("WHOIS", nick)
def mode(self, channel, add='', remove=''):
"""Add and/or remove modes for a given channel.
The 'add' and 'remove' arguments may, if specified, be either
sequences or dictionaries. If a dictionary is specified, the
corresponding values will be passed as arguments (with expansion
if necessary - {'b': ['foo','bar']} will result in two bans:
MODE <channel> +bb foo bar
(Values for modes which do not take arguments are ignored.)
"""
if channel not in self.server.channels:
_log.warning("Ignoring request to set modes in channel '%s' "
"because we are not in that channel.", channel)
return
chanmodes = self._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
def _arg_to_list(arg, argument_modes, toggle_modes):
if not isinstance(arg, dict):
modes = set(arg)
invalid_modes = modes - toggle_modes
if invalid_modes:
_log.warning("Ignoring the mode(s) '%s' because they are "
"missing required arguments.",
"".join(invalid_modes))
return modes & toggle_modes, []
# Okay, so arg is a dict
modes_with_args = []
modes_without_args = set()
for k,v in arg.iteritems():
if isinstance(v, str):
v = [v]
if k in argument_modes:
for val in v:
modes_with_args.append((k,val))
elif k in toggle_modes:
modes_without_args.add(k)
else:
_log.warning("Ignoring request to set channel mode '%s' "
"because it is not a recognized mode.", k)
return modes_without_args, modes_with_args
add_modes, add_modes_args = _arg_to_list(
add, list_modes | always_arg_modes | set_arg_modes, toggle_modes)
remove_modes, remove_modes_args = _arg_to_list(
remove, list_modes | always_arg_modes, set_arg_modes | toggle_modes)
max_arg = self.server.features.get("MODES") or 3
def _send_modes(op, toggle_modes, arg_modes):
while toggle_modes or arg_modes:
modes = "".join(toggle_modes)
toggle_modes = ""
now_modes, arg_modes = arg_modes[:max_arg], arg_modes[max_arg:]
modes += "".join(mode for mode,arg in now_modes)
modes += "".join(" %s" % arg for mode,arg in now_modes)
self.send("MODE", channel, "%s%s" % (op, modes))
_send_modes("+", add_modes, add_modes_args)
_send_modes("-", remove_modes, remove_modes_args)
def handle(self, event):
"""Decorator for adding a handler function for a particular event.
Usage:
my_client = Client()
@my_client.handle("WELCOME")
def welcome_handler(client, *params):
# Do something with the event.
pass
"""
def dec(func):
self.add_handler(event, func)
return func
return dec
def _get_prefixes(self):
"""Get the possible nick prefixes and associated modes for a client."""
prefixes = {
"@": "o",
"+": "v",
}
feature_prefixes = self.server.features.get('PREFIX')
if feature_prefixes:
modes = feature_prefixes[1:len(feature_prefixes)//2]
symbols = feature_prefixes[len(feature_prefixes)//2+1:]
prefixes = dict(zip(symbols, modes))
return prefixes
def _get_chanmodes(self):
chanmodes = self.server.features.get('CHANMODES')
if not chanmodes:
# Defaults from RFC 2811
list_modes = set("beI")
always_arg_modes = set()
set_arg_modes = set("kl")
toggle_modes = set("aimnqpsrt")
else:
chanmodes = chanmodes.split(",")
list_modes = set(chanmodes[0])
always_arg_modes = set(chanmodes[1])
set_arg_modes = set(chanmodes[2])
toggle_modes = set(chanmodes[3])
return list_modes, always_arg_modes, set_arg_modes, toggle_modes
################################################################################
# DEFAULT LOW-LEVEL EVENT HANDLERS
################################################################################
def on_connect(client):
"""Default on-connect actions."""
client.nick(client.user.nick)
client.userinfo(client.user.username, client.user.realname)
def on_line(client, line):
"""Default handling for incoming lines.
This handler will automatically manage the following IRC messages:
PING:
Responds with a PONG.
PRIVMSG:
Dispatches the PRIVMSG event.
NOTICE:
Dispatches the NOTICE event.
MOTDSTART:
Initializes MOTD receive buffer.
MOTD:
Appends a line to the MOTD receive buffer.
ENDOFMOTD:
Joins the contents of the MOTD receive buffer, assigns the result
to the .motd of the server, and dispatches the MOTD event.
"""
if line.startswith("PING"):
client.send("PONG" + line[4:])
return True
if line.startswith(":"):
actor, _, line = line[1:].partition(" ")
else:
actor = None
command, _, args = line.partition(" ")
command = NUMERIC_EVENTS.get(command, command)
parser = PARSERS.get(command, False)
if parser:
parser(client, command, actor, args)
return True
elif parser is False:
# Explicitly ignored message
return True
################################################################################
# COMMAND PARSERS
################################################################################
# Holds a mapping of IRC commands to functions that will parse them and
# take any necessary action. We define some ignored events here as well.
PARSERS = {
"YOURHOST": False,
}
def parser(*events):
"""Decorator for convenience - adds a function as a parser for event(s)."""
def dec(func):
for event in events:
PARSERS[event] = func
return func
return dec
@parser("PRIVMSG", "NOTICE")
def _parse_msg(client, command, actor, args):
"""Parse a PRIVMSG or NOTICE and dispatch the corresponding event."""
recipient, _, message = args.partition(' :')
chantypes = client.server.features.get("CHANTYPES", "#")
if recipient[0] in chantypes:
recipient = client.server.channels.get(recipient) or recipient
else:
recipient = User(recipient)
client.dispatch_event(command, actor, recipient, message)
@parser("MOTDSTART", "ENDOFMOTD", "MOTD")
def _parse_motd(client, command, actor, args):
if command == "MOTDSTART":
client.server._motd = []
if command == "ENDOFMOTD":
client.server.motd = "\n".join(client.server._motd)
client.dispatch_event("MOTD", client.server.motd)
if command == "MOTD": # MOTD line
client.server._motd.append(args.partition(":")[2])
@parser("JOIN")
def _parse_join(client, command, actor, args):
"""Parse a JOIN and update channel states, then dispatch events.
Note that two events are dispatched here:
- JOIN, because a user joined the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel = args.lstrip(' :')
if actor.nick == client.user.nick:
client.server.add_channel(channel)
client.user.host = actor.host # now we know our host per the server
channel = client.server.channels[channel]
channel.add_user(actor)
client.dispatch_event("JOIN", actor, channel)
if actor.nick != client.user.nick:
# If this is us joining, the namreply will trigger this instead
client.dispatch_event("MEMBERS", channel)
@parser("PART")
def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.channels[channel]
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel)
@parser("QUIT")
def _parse_quit(client, command, actor, args):
"""Parse a QUIT and update channel states, then dispatch events.
Note that two events are dispatched here:
- QUIT, because a user quit the server
- MEMBERS, for each channel the user is no longer in
"""
actor = User(actor)
_, _, message = args.partition(':')
client.dispatch_event("QUIT", actor, message)
for chan in client.server.channels.itervalues():
if actor.nick in chan.members:
chan.remove(actor)
client.dispatch_event("MEMBERS", chan)
@parser("KICK")
def _parse_kick(client, command, actor, args):
"""Parse a KICK and update channel states, then dispatch events.
Note that two events are dispatched here:
- KICK, because a user was kicked from the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
args, _, message = args.partition(' :')
channel, target = args.split()
channel = client.server.channels[channel]
channel.remove_user(target)
target = User(target)
if target.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("KICK", actor, target, channel, message)
client.dispatch_event("MEMBERS", channel)
@parser("TOPIC")
def _parse_topic(client, command, actor, args):
"""Parse a TOPIC and update channel state, then dispatch a TOPIC event."""
channel, _, topic = args.partition(" :")
channel = client.server.channels[channel]
channel.topic = topic or None
if actor:
actor = User(actor)
client.dispatch_event("TOPIC", actor, channel, topic)
@parser("WELCOME")
def _parse_welcome(client, command, actor, args):
"""Parse a WELCOME and update user state, then dispatch a WELCOME event."""
_, _, hostmask = args.rpartition(' ')
client.user.update_from_hostmask(hostmask)
client.dispatch_event("WELCOME", hostmask)
@parser("CREATED")
def _parse_created(client, command, actor, args):
"""Parse CREATED and update the Host object."""
m = re.search("This server was created (.+)$", args)
if m:
client.server.created = m.group(1)
@parser("MYINFO")
def _parse_myinfo(client, command, actor, args):
"""Parse MYINFO and update the Host object."""
_, server, version, usermodes, channelmodes = args.split(None, 5)[:5]
s = client.server
s.host = server
s.version = version
s.user_modes = set(usermodes)
s.channel_modes = set(channelmodes)
@parser("FEATURELIST")
def _parse_featurelist(client, command, actor, args):
"""Parse FEATURELIST and update the Host object."""
# Strip off ":are supported by this server"
args = args.rsplit(":", 1)[0]
# Strip off the nick; we know it's addressed to us.
_, _, args = args.partition(' ')
items = args.split()
for item in items:
feature, _, value = item.partition("=")
# Convert integer values to actual integers for convenience
try:
value = int(value)
except (ValueError, TypeError):
pass
client.server.features[feature] = value
@parser("NAMREPLY")
def _parse_namreply(client, command, actor, args):
"""Parse NAMREPLY and update a Channel object."""
prefixes = client._get_prefixes()
channelinfo, _, useritems = args.partition(' :')
_, _, channel = channelinfo.rpartition(' ') # channeltype channelname
c = client.server.channels.get(channel)
if not c:
_log.warning("Ignoring NAMREPLY for channel '%s' which we are not in.",
channel)
return
# We bypass Channel.add_user() here because we just want to sync in any
# users we don't already have, regardless of if other users exist, and
# we don't want the warning spam.
for nick in useritems.split():
modes = set()
while nick[0] in prefixes:
modes.add(prefixes[nick[0]])
nick = nick[1:]
user = c.members.get(nick)
if not user:
user = c.members[nick] = User(nick)
_log.debug("Added user %s to channel %s", user, channel)
user.modes |= modes
@parser("ENDOFNAMES")
def _parse_endofnames(client, command, actor, args):
"""Parse an ENDOFNAMES and dispatch a NAMES event for the channel."""
args = args.split(" :", 1)[0] # Strip off human-readable message
_, _, channel = args.rpartition(' ')
channel = client.server.channels.get(channel) or channel
client.dispatch_event('MEMBERS', channel)
@parser("MODE")
def _parse_mode(client, command, actor, args):
"""Parse a mode changes, update states, and dispatch MODE events."""
chantypes = client.server.features.get("CHANTYPES", "#")
channel, _, args = args.partition(" ")
args = args.lstrip(":")
if channel[0] not in chantypes:
# Personal modes
for modes in args.split():
op, modes = modes[0], modes[1:]
for mode in modes:
if op == "+":
client.user.modes.add(mode)
else:
client.user.modes.discard(mode)
client.dispatch_event("MODE", actor, client.user, op, mode, None)
return
# channel-specific modes
chan = client.server.channels[channel]
user_modes = set(client._get_prefixes().itervalues())
chanmodes = client._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
argument_modes = list_modes | always_arg_modes | set_arg_modes
tokens = args.split()
while tokens:
modes, tokens = tokens[0], tokens[1:]
op, modes = modes[0], modes[1:]
for mode in modes:
argument = None
if mode in (user_modes | argument_modes):
argument, tokens = tokens[0], tokens[1:]
if mode in user_modes:
user = client.server.channels[channel].members[argument]
if op == "+":
user.modes.add(mode)
else:
user.modes.discard(mode)
if op == "+":
if mode in (always_arg_modes | set_arg_modes):
chan.modes[mode] = argument
elif mode in toggle_modes:
chan.modes[mode] = True
else:
if mode in (always_arg_modes | set_arg_modes | toggle_modes):
if mode in chan.modes:
del chan.modes[mode]
# list-type modes (bans+exceptions, invite masks) aren't stored,
# but do generate MODE events.
client.dispatch_event("MODE", actor, chan, op, mode, argument)
@parser("WHOISUSER", "WHOISCHANNELS", "WHOISIDLE", "WHOISSERVER",
"WHOISOPERATOR", "WHOISACCOUNT", "WHOISBOT", "WHOISREGNICK",
"ENDOFWHOIS")
def _parse_whois(client, command, actor, args):
"""Parse the content responses from a WHOIS query.
Individual response lines are parsed and used to fill in data in a buffer,
the full contents of which are then sent as the argument to the WHOIS
event dispatched when an ENDOFWHOIS line is received from the server.
"""
_, _, args = args.partition(" ") # Strip off recipient, we know it"s us
nick, _, args = args.partition(" ")
if client.server._whois.get("nick") != nick:
client.server._whois = {"nick": nick}
response = client.server._whois
if command == "WHOISUSER":
first, _, response["realname"] = args.partition(":")
response["username"], response["host"] = first.split()[:2]
return
if command == "WHOISISSERVER":
response["server"], _, response["serverinfo"] = args.partition(" :")
return
if command == "WHOISOPERATOR":
response["oper"] = True
return
if command == "WHOISIDLE":
response["idle"], _, _ = args.partition(" :")
response["idle"] = int(response["idle"])
return
if command == "WHOISCHANNELS":
modes = "".join(client._get_prefixes())
print repr(modes)
channels = args.lstrip(":").split()
response["channels"] = dict(
(chan.lstrip(modes), chan[0] if chan[0] in modes else "")
for chan in channels)
return
if command == "WHOISACCOUNT":
response["account"], _, _ = args.partition(" :")
return
if command == "WHOISBOT":
response["bot"] = True
return
if command == "WHOISREGNICK":
response["registered"] = True
return
if command == "ENDOFWHOIS":
client.dispatch_event("WHOIS", response)
@parser("NICK")
def _parse_nick(client, command, actor, args):
"""Parse a NICK response, update state, and dispatch events.
Note: this function dispatches both a NICK event and also one or more
MEMBERS events for each channel the user that changed nick was in.
"""
old_nick, _, _ = actor.partition('!')
new_nick = args
if old_nick == client.user.nick:
client.user.nick = new_nick
modified_channels = set()
for channel in client.server.channels.itervalues():
user = channel.members.get(old_nick)
if user:
user.nick = new_nick
channel.members[new_nick] = user
del channel.members[old_nick]
modified_channels.add(channel.name)
client.dispatch_event("NICK", old_nick, new_nick)
for channel in modified_channels:
client.dispatch_event("MEMBERS", channel)
@parser("INVITE")
def _parse_invite(client, command, actor, args):
"""Parse an INVITE and dispatch an event."""
target, _, channel = args.rpartition(" ")
client.dispatch_event("INVITE", actor, target, channel)
# vim: set ts=4 sts=4 sw=4 et:
Correct method name.
import logging
import re
import socket
try:
import ssl as _ssl
_hush_pyflakes = [_ssl]
del _hush_pyflakes
except ImportError:
_ssl = None # No SSL support
from kitnirc.events import NUMERIC_EVENTS
from kitnirc.user import User
_log = logging.getLogger(__name__)
class Channel(object):
"""Information about an IRC channel.
This class keeps track of things like who is in a channel, the channel
topic, modes, and so on.
"""
def __init__(self, name):
self.name = name
self.topic = None
self.members = {}
self.modes = {}
def __str__(self):
return self.name
def __repr__(self):
return "kitnirc.client.Channel(%r)" % self.name
def add_user(self, user):
"""Adds a user to the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick in self.members:
_log.warning("Ignoring request to add user '%s' to channel '%s' "
"because that user is already in the member list.",
user, self.name)
return
self.members[user.nick] = user
_log.debug("Added '%s' to channel '%s'", user, self.name)
def remove_user(self, user):
"""Removes a user from the channel."""
if not isinstance(user, User):
user = User(user)
if user.nick not in self.members:
_log.warning("Ignoring request to remove user '%s' from channel "
"'%s' because that user is already in the member "
"list.", user, self.name)
return
del self.members[user.nick]
_log.debug("Removed '%s' from channel '%s'", user, self.name)
class Host(object):
"""Information about an IRC server.
This class keeps track of things like what channels a client is in,
who is in those channels, and other such details.
"""
def __init__(self, host, port):
self.host = host
# We also keep track of the host we originally connected to - e.g.
# if we connected to a round-robin alias.
self.original_host = host
self.port = port
self.password = None
self.motd = "" # The full text of the MOTD, once received
self._motd = [] # Receive buffer; do not use for reading
# Buffer for information from WHOIS response lines
self._whois = {}
# The channels we're in, keyed by channel name
self.channels = {}
# What features modes are available on the server
self.features = dict()
self.user_modes = set()
self.channel_modes = set()
# Miscellaneous information about the server
self.version = None
self.created = None
def __str__(self):
return self.host
def __repr__(self):
return "kitnirc.client.Host(%r, %r)" % (self.host, self.port)
def add_channel(self, channel):
if not isinstance(channel, Channel):
channel = Channel(channel)
if channel.name in self.channels:
_log.warning("Ignoring request to add a channel that has already "
"been added: '%s'", channel)
return
self.channels[channel.name] = channel
_log.info("Entered channel %s.", channel)
def remove_channel(self, channel):
if isinstance(channel, Channel):
channel = channel.name
if channel not in self.channels:
_log.warning("Ignoring request to remove a channel that hasn't "
"been added: '%s'", channel)
return
del self.channels[channel]
_log.info("Left channel %s.", channel)
class Client(object):
"""An IRC client.
This class wraps a connection to a single IRC network and provides
additional functionality (e.g. tracking of nicks and channels).
"""
def __init__(self, host=None, port=6667):
if host:
self.server = Host(host, port)
else:
self.server = None
self.connected = False
self.socket = None
self._stop = False
self._buffer = ""
# Queues for event dispatching.
self.event_handlers = {
###### CONNECTION-LEVEL EVENTS ######
# Fires while the client is connecting, when a password should be
# supplied. If nothing supplies a password, the password argument
# of connect() will be used (if set).
"PASSWORD": [],
# Fires after the client's socket connects.
"CONNECTED": [on_connect],
# Fires every time a line is received
"LINE": [on_line],
# Fires whenever a line isn't handled by LINE
"RAWLINE": [],
###### IRC-LEVEL EVENTS ######
# Fires when receiving the 001 RPL_WELCOME message upon
# being recognized as a valid user by the IRC server.
"WELCOME": [],
# Fires when a privmsg is received
"PRIVMSG": [], # actor, recipient
# Fires when a notice is received
"NOTICE": [],
# Fires when a complete MOTD is received
"MOTD": [],
# Fires when a user joins a channel
"JOIN": [],
# Fires when a user parts a channel
"PART": [],
# Fires when a user quits the server
"QUIT": [],
# Fires when a user is kicked from a channel
"KICK": [],
# Fires when the list of users in a channel has been updated
"MEMBERS": [],
# Fires whenever a mode change occurs
"MODE": [],
# Fires when a WHOIS response is complete
"WHOIS": [],
# Fires when a channel topic changes
"TOPIC": [],
# Fires when someone invites us to a channel
"INVITE": [],
}
def add_handler(self, event, handler):
"""Adds a handler for a particular event.
Handlers are appended to the list, so a handler added earlier
will be called before a handler added later. If you wish to
insert a handler at another position, you should modify the
event_handlers property directly:
my_client.event_handlers['PRIVMSG'].insert(0, my_handler)
"""
if event not in self.event_handlers:
_log.info("Adding event handler for new event %s.", event)
self.event_handlers[event] = [handler]
else:
self.event_handlers[event].append(handler)
def dispatch_event(self, event, *args):
"""Dispatches an event.
Returns a boolean indicating whether or not a handler
suppressed further handling of the event (even the last).
"""
if event not in self.event_handlers:
_log.error("Dispatch requested for unknown event '%s'", event)
return False
elif event != "LINE":
_log.debug("Dispatching event %s %r", event, args)
try:
for handler in self.event_handlers[event]:
# (client, server, *args) : args are dependent on event
if handler(self, *args):
# Returning a truthy value supresses further handlers
# for this event.
return True
except Exception as e:
_log.exception("Error while processing event '%s': %r", event, e)
# Fall back to the RAWLINE event if LINE can't process it.
if event == "LINE":
return self.dispatch_event("RAWLINE", *args)
return False
def connect(self, nick, username=None, realname=None, password=None,
host=None, port=6667, ssl=None):
"""Connect to the server using the specified credentials.
Note: if host is specified here, both the host and port arguments
passed to Client.__init__ will be ignored.
If the 'ssl' argument is boolean true, will use SSL. If it is a
dictionary, will both use SSL and pass the contents as kwargs to
the ssl.wrap_socket() call.
"""
if host:
self.server = Host(host, port)
if self.server is None:
_log.error("Can't connect() without a host specified.")
return
self.user = User(nick)
self.user.username = username or nick
self.user.realname = realname or username or nick
_log.info("Connecting to %s as %s ...", self.server.host, nick)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if ssl and _ssl:
ssl_kwargs = ssl if isinstance(ssl, dict) else {}
self.socket = _ssl.wrap_socket(self.socket, **ssl_kwargs)
elif ssl:
_log.error("SSL requested but no SSL support available!")
return
self.socket.connect((self.server.host, self.server.port))
self.connected = True
_log.info("Connected to %s.", self.server.host)
# Allow an event handler to supply a password instead, if it wants
suppress_password = self.dispatch_event("PASSWORD")
if password and not suppress_password:
# We bypass our own send() function here to avoid logging passwords
_log.info("Sending server password.")
self.socket.send("PASS %s\r\n" % password)
self.server.password = password
self.dispatch_event('CONNECTED')
def disconnect(self, msg="Shutting down..."):
if not self.connected:
_log.warning("Disconnect requested from non-connected client (%s)",
self.server.host)
return
_log.info("Disconnecting from %s ...", self.server.host)
self._stop = True
self.send("QUIT", ":" + msg)
try:
self.socket.close()
except socket.error:
pass
def run(self):
"""Process events such as incoming data.
This method blocks indefinitely. It will only return after the
connection to the server is closed.
"""
self._stop = False # Allow re-starting the event loop
while not self._stop:
try:
self._buffer += self.socket.recv(4096)
except socket.error:
raise
lines = self._buffer.split("\n")
self._buffer = lines.pop() # Last line may not have been fully read
for line in lines:
line = line.rstrip("\r")
_log.debug("%s --> %s", self.server.host, line)
self.dispatch_event("LINE", line)
def send(self, *args):
"""Sends a single raw message to the IRC server.
Arguments are automatically joined by spaces. No newlines are allowed.
"""
msg = " ".join(a.nick if isinstance(a, User) else str(a) for a in args)
if "\n" in msg:
raise ValueError("Cannot send() a newline. Args: %r" % args)
_log.debug("%s <-- %s", self.server.host, msg)
self.socket.send(msg + "\r\n")
def nick(self, nick):
"""Attempt to set the nickname for this connection."""
_log.info("Requesting nick change to '%s'", nick)
self.send("NICK", nick)
def userinfo(self, username, realname=None):
"""Set the username and realname for this connection.
Note: this should only be called once, on connect. (The default
on-connect routine calls this automatically.)
"""
realname = realname or username
_log.info("Requesting user info update: username=%s realname=%s",
username, realname)
self.send("USER", username, socket.getfqdn(), self.server.host,
":%s" % realname) # Realname should always be prefixed by a colon
self.user.username = username
self.user.realname = realname
def msg(self, target, message):
"""Send a message to a user or channel."""
self.send("PRIVMSG", target, ":" + message)
def reply(self, incoming, user, message):
"""Replies to a user in a given channel or PM.
If the specified incoming is a user, simply sends a PM to user.
If the specified incoming is a channel, prefixes the message with the
user's nick and sends it to the channel.
This is specifically useful in creating responses to commands that can
be used in either a channel or in a PM, and responding to the person
who invoked the command.
"""
if not isinstance(user, User):
user = User(user)
if isinstance(incoming, User):
self.msg(user, message)
else:
self.msg(incoming, "%s: %s" % (user.nick, message))
def notice(self, target, message):
"""Send a NOTICE to a user or channel."""
self.send("NOTICE", target, ":" + message)
def ctcp(self, target, message):
"""Send a CTCP message to a user or channel."""
self.msg(target, "\x01%s\x01" % message)
def emote(self, target, message):
"""Sends an emote (/me ...) to a user or channel."""
self.ctcp(target, "ACTION %s" % message)
def join(self, target, key=None):
"""Attempt to join a channel.
The optional second argument is the channel key, if needed.
"""
chantypes = self.server.features.get("CHANTYPES", "#")
if not target or target[0] not in chantypes:
# Among other things, this prevents accidentally sending the
# "JOIN 0" command which actually removes you from all channels
_log.warning("Refusing to join channel that does not start "
"with one of '%s': %s", chantypes, target)
return False
if target in self.server.channels:
_log.warning("Ignoring request to join channel '%s' because we "
"are already in that channel.", target)
return False
_log.info("Joining channel %s ...", target)
self.send("JOIN", target, *([key] if key else []))
return True
def invite(self, channel, nick):
"""Attempt to invite a user to a channel."""
self.send("INVITE", nick, channel)
def part(self, target, message=None):
"""Part a channel."""
if target not in self.server.channels:
_log.warning("Ignoring request to part channel '%s' because we "
"are not in that channel.", target)
return
return False
self.send("PART", target, *([message] if message else []))
return True
def quit(self, message=None):
"""Quit the server (and stop the event loop).
This actually just calls .disconnect() with the provided message."""
self.disconnect(message or "Bye")
def kick(self, channel, nick, message=None):
"""Attempt to kick a user from a channel.
If a message is not provided, defaults to own nick.
"""
self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
def whois(self, nick):
"""Request WHOIS information about a user."""
self.send("WHOIS", nick)
def mode(self, channel, add='', remove=''):
"""Add and/or remove modes for a given channel.
The 'add' and 'remove' arguments may, if specified, be either
sequences or dictionaries. If a dictionary is specified, the
corresponding values will be passed as arguments (with expansion
if necessary - {'b': ['foo','bar']} will result in two bans:
MODE <channel> +bb foo bar
(Values for modes which do not take arguments are ignored.)
"""
if channel not in self.server.channels:
_log.warning("Ignoring request to set modes in channel '%s' "
"because we are not in that channel.", channel)
return
chanmodes = self._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
def _arg_to_list(arg, argument_modes, toggle_modes):
if not isinstance(arg, dict):
modes = set(arg)
invalid_modes = modes - toggle_modes
if invalid_modes:
_log.warning("Ignoring the mode(s) '%s' because they are "
"missing required arguments.",
"".join(invalid_modes))
return modes & toggle_modes, []
# Okay, so arg is a dict
modes_with_args = []
modes_without_args = set()
for k,v in arg.iteritems():
if isinstance(v, str):
v = [v]
if k in argument_modes:
for val in v:
modes_with_args.append((k,val))
elif k in toggle_modes:
modes_without_args.add(k)
else:
_log.warning("Ignoring request to set channel mode '%s' "
"because it is not a recognized mode.", k)
return modes_without_args, modes_with_args
add_modes, add_modes_args = _arg_to_list(
add, list_modes | always_arg_modes | set_arg_modes, toggle_modes)
remove_modes, remove_modes_args = _arg_to_list(
remove, list_modes | always_arg_modes, set_arg_modes | toggle_modes)
max_arg = self.server.features.get("MODES") or 3
def _send_modes(op, toggle_modes, arg_modes):
while toggle_modes or arg_modes:
modes = "".join(toggle_modes)
toggle_modes = ""
now_modes, arg_modes = arg_modes[:max_arg], arg_modes[max_arg:]
modes += "".join(mode for mode,arg in now_modes)
modes += "".join(" %s" % arg for mode,arg in now_modes)
self.send("MODE", channel, "%s%s" % (op, modes))
_send_modes("+", add_modes, add_modes_args)
_send_modes("-", remove_modes, remove_modes_args)
def handle(self, event):
"""Decorator for adding a handler function for a particular event.
Usage:
my_client = Client()
@my_client.handle("WELCOME")
def welcome_handler(client, *params):
# Do something with the event.
pass
"""
def dec(func):
self.add_handler(event, func)
return func
return dec
def _get_prefixes(self):
"""Get the possible nick prefixes and associated modes for a client."""
prefixes = {
"@": "o",
"+": "v",
}
feature_prefixes = self.server.features.get('PREFIX')
if feature_prefixes:
modes = feature_prefixes[1:len(feature_prefixes)//2]
symbols = feature_prefixes[len(feature_prefixes)//2+1:]
prefixes = dict(zip(symbols, modes))
return prefixes
def _get_chanmodes(self):
chanmodes = self.server.features.get('CHANMODES')
if not chanmodes:
# Defaults from RFC 2811
list_modes = set("beI")
always_arg_modes = set()
set_arg_modes = set("kl")
toggle_modes = set("aimnqpsrt")
else:
chanmodes = chanmodes.split(",")
list_modes = set(chanmodes[0])
always_arg_modes = set(chanmodes[1])
set_arg_modes = set(chanmodes[2])
toggle_modes = set(chanmodes[3])
return list_modes, always_arg_modes, set_arg_modes, toggle_modes
################################################################################
# DEFAULT LOW-LEVEL EVENT HANDLERS
################################################################################
def on_connect(client):
"""Default on-connect actions."""
client.nick(client.user.nick)
client.userinfo(client.user.username, client.user.realname)
def on_line(client, line):
"""Default handling for incoming lines.
This handler will automatically manage the following IRC messages:
PING:
Responds with a PONG.
PRIVMSG:
Dispatches the PRIVMSG event.
NOTICE:
Dispatches the NOTICE event.
MOTDSTART:
Initializes MOTD receive buffer.
MOTD:
Appends a line to the MOTD receive buffer.
ENDOFMOTD:
Joins the contents of the MOTD receive buffer, assigns the result
to the .motd of the server, and dispatches the MOTD event.
"""
if line.startswith("PING"):
client.send("PONG" + line[4:])
return True
if line.startswith(":"):
actor, _, line = line[1:].partition(" ")
else:
actor = None
command, _, args = line.partition(" ")
command = NUMERIC_EVENTS.get(command, command)
parser = PARSERS.get(command, False)
if parser:
parser(client, command, actor, args)
return True
elif parser is False:
# Explicitly ignored message
return True
################################################################################
# COMMAND PARSERS
################################################################################
# Holds a mapping of IRC commands to functions that will parse them and
# take any necessary action. We define some ignored events here as well.
PARSERS = {
"YOURHOST": False,
}
def parser(*events):
"""Decorator for convenience - adds a function as a parser for event(s)."""
def dec(func):
for event in events:
PARSERS[event] = func
return func
return dec
@parser("PRIVMSG", "NOTICE")
def _parse_msg(client, command, actor, args):
"""Parse a PRIVMSG or NOTICE and dispatch the corresponding event."""
recipient, _, message = args.partition(' :')
chantypes = client.server.features.get("CHANTYPES", "#")
if recipient[0] in chantypes:
recipient = client.server.channels.get(recipient) or recipient
else:
recipient = User(recipient)
client.dispatch_event(command, actor, recipient, message)
@parser("MOTDSTART", "ENDOFMOTD", "MOTD")
def _parse_motd(client, command, actor, args):
if command == "MOTDSTART":
client.server._motd = []
if command == "ENDOFMOTD":
client.server.motd = "\n".join(client.server._motd)
client.dispatch_event("MOTD", client.server.motd)
if command == "MOTD": # MOTD line
client.server._motd.append(args.partition(":")[2])
@parser("JOIN")
def _parse_join(client, command, actor, args):
"""Parse a JOIN and update channel states, then dispatch events.
Note that two events are dispatched here:
- JOIN, because a user joined the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel = args.lstrip(' :')
if actor.nick == client.user.nick:
client.server.add_channel(channel)
client.user.host = actor.host # now we know our host per the server
channel = client.server.channels[channel]
channel.add_user(actor)
client.dispatch_event("JOIN", actor, channel)
if actor.nick != client.user.nick:
# If this is us joining, the namreply will trigger this instead
client.dispatch_event("MEMBERS", channel)
@parser("PART")
def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.channels[channel]
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel)
@parser("QUIT")
def _parse_quit(client, command, actor, args):
"""Parse a QUIT and update channel states, then dispatch events.
Note that two events are dispatched here:
- QUIT, because a user quit the server
- MEMBERS, for each channel the user is no longer in
"""
actor = User(actor)
_, _, message = args.partition(':')
client.dispatch_event("QUIT", actor, message)
for chan in client.server.channels.itervalues():
if actor.nick in chan.members:
chan.remove_user(actor)
client.dispatch_event("MEMBERS", chan)
@parser("KICK")
def _parse_kick(client, command, actor, args):
"""Parse a KICK and update channel states, then dispatch events.
Note that two events are dispatched here:
- KICK, because a user was kicked from the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
args, _, message = args.partition(' :')
channel, target = args.split()
channel = client.server.channels[channel]
channel.remove_user(target)
target = User(target)
if target.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("KICK", actor, target, channel, message)
client.dispatch_event("MEMBERS", channel)
@parser("TOPIC")
def _parse_topic(client, command, actor, args):
"""Parse a TOPIC and update channel state, then dispatch a TOPIC event."""
channel, _, topic = args.partition(" :")
channel = client.server.channels[channel]
channel.topic = topic or None
if actor:
actor = User(actor)
client.dispatch_event("TOPIC", actor, channel, topic)
@parser("WELCOME")
def _parse_welcome(client, command, actor, args):
"""Parse a WELCOME and update user state, then dispatch a WELCOME event."""
_, _, hostmask = args.rpartition(' ')
client.user.update_from_hostmask(hostmask)
client.dispatch_event("WELCOME", hostmask)
@parser("CREATED")
def _parse_created(client, command, actor, args):
"""Parse CREATED and update the Host object."""
m = re.search("This server was created (.+)$", args)
if m:
client.server.created = m.group(1)
@parser("MYINFO")
def _parse_myinfo(client, command, actor, args):
"""Parse MYINFO and update the Host object."""
_, server, version, usermodes, channelmodes = args.split(None, 5)[:5]
s = client.server
s.host = server
s.version = version
s.user_modes = set(usermodes)
s.channel_modes = set(channelmodes)
@parser("FEATURELIST")
def _parse_featurelist(client, command, actor, args):
"""Parse FEATURELIST and update the Host object."""
# Strip off ":are supported by this server"
args = args.rsplit(":", 1)[0]
# Strip off the nick; we know it's addressed to us.
_, _, args = args.partition(' ')
items = args.split()
for item in items:
feature, _, value = item.partition("=")
# Convert integer values to actual integers for convenience
try:
value = int(value)
except (ValueError, TypeError):
pass
client.server.features[feature] = value
@parser("NAMREPLY")
def _parse_namreply(client, command, actor, args):
"""Parse NAMREPLY and update a Channel object."""
prefixes = client._get_prefixes()
channelinfo, _, useritems = args.partition(' :')
_, _, channel = channelinfo.rpartition(' ') # channeltype channelname
c = client.server.channels.get(channel)
if not c:
_log.warning("Ignoring NAMREPLY for channel '%s' which we are not in.",
channel)
return
# We bypass Channel.add_user() here because we just want to sync in any
# users we don't already have, regardless of if other users exist, and
# we don't want the warning spam.
for nick in useritems.split():
modes = set()
while nick[0] in prefixes:
modes.add(prefixes[nick[0]])
nick = nick[1:]
user = c.members.get(nick)
if not user:
user = c.members[nick] = User(nick)
_log.debug("Added user %s to channel %s", user, channel)
user.modes |= modes
@parser("ENDOFNAMES")
def _parse_endofnames(client, command, actor, args):
"""Parse an ENDOFNAMES and dispatch a NAMES event for the channel."""
args = args.split(" :", 1)[0] # Strip off human-readable message
_, _, channel = args.rpartition(' ')
channel = client.server.channels.get(channel) or channel
client.dispatch_event('MEMBERS', channel)
@parser("MODE")
def _parse_mode(client, command, actor, args):
"""Parse a mode changes, update states, and dispatch MODE events."""
chantypes = client.server.features.get("CHANTYPES", "#")
channel, _, args = args.partition(" ")
args = args.lstrip(":")
if channel[0] not in chantypes:
# Personal modes
for modes in args.split():
op, modes = modes[0], modes[1:]
for mode in modes:
if op == "+":
client.user.modes.add(mode)
else:
client.user.modes.discard(mode)
client.dispatch_event("MODE", actor, client.user, op, mode, None)
return
# channel-specific modes
chan = client.server.channels[channel]
user_modes = set(client._get_prefixes().itervalues())
chanmodes = client._get_chanmodes()
list_modes, always_arg_modes, set_arg_modes, toggle_modes = chanmodes
argument_modes = list_modes | always_arg_modes | set_arg_modes
tokens = args.split()
while tokens:
modes, tokens = tokens[0], tokens[1:]
op, modes = modes[0], modes[1:]
for mode in modes:
argument = None
if mode in (user_modes | argument_modes):
argument, tokens = tokens[0], tokens[1:]
if mode in user_modes:
user = client.server.channels[channel].members[argument]
if op == "+":
user.modes.add(mode)
else:
user.modes.discard(mode)
if op == "+":
if mode in (always_arg_modes | set_arg_modes):
chan.modes[mode] = argument
elif mode in toggle_modes:
chan.modes[mode] = True
else:
if mode in (always_arg_modes | set_arg_modes | toggle_modes):
if mode in chan.modes:
del chan.modes[mode]
# list-type modes (bans+exceptions, invite masks) aren't stored,
# but do generate MODE events.
client.dispatch_event("MODE", actor, chan, op, mode, argument)
@parser("WHOISUSER", "WHOISCHANNELS", "WHOISIDLE", "WHOISSERVER",
"WHOISOPERATOR", "WHOISACCOUNT", "WHOISBOT", "WHOISREGNICK",
"ENDOFWHOIS")
def _parse_whois(client, command, actor, args):
"""Parse the content responses from a WHOIS query.
Individual response lines are parsed and used to fill in data in a buffer,
the full contents of which are then sent as the argument to the WHOIS
event dispatched when an ENDOFWHOIS line is received from the server.
"""
_, _, args = args.partition(" ") # Strip off recipient, we know it"s us
nick, _, args = args.partition(" ")
if client.server._whois.get("nick") != nick:
client.server._whois = {"nick": nick}
response = client.server._whois
if command == "WHOISUSER":
first, _, response["realname"] = args.partition(":")
response["username"], response["host"] = first.split()[:2]
return
if command == "WHOISISSERVER":
response["server"], _, response["serverinfo"] = args.partition(" :")
return
if command == "WHOISOPERATOR":
response["oper"] = True
return
if command == "WHOISIDLE":
response["idle"], _, _ = args.partition(" :")
response["idle"] = int(response["idle"])
return
if command == "WHOISCHANNELS":
modes = "".join(client._get_prefixes())
print repr(modes)
channels = args.lstrip(":").split()
response["channels"] = dict(
(chan.lstrip(modes), chan[0] if chan[0] in modes else "")
for chan in channels)
return
if command == "WHOISACCOUNT":
response["account"], _, _ = args.partition(" :")
return
if command == "WHOISBOT":
response["bot"] = True
return
if command == "WHOISREGNICK":
response["registered"] = True
return
if command == "ENDOFWHOIS":
client.dispatch_event("WHOIS", response)
@parser("NICK")
def _parse_nick(client, command, actor, args):
"""Parse a NICK response, update state, and dispatch events.
Note: this function dispatches both a NICK event and also one or more
MEMBERS events for each channel the user that changed nick was in.
"""
old_nick, _, _ = actor.partition('!')
new_nick = args
if old_nick == client.user.nick:
client.user.nick = new_nick
modified_channels = set()
for channel in client.server.channels.itervalues():
user = channel.members.get(old_nick)
if user:
user.nick = new_nick
channel.members[new_nick] = user
del channel.members[old_nick]
modified_channels.add(channel.name)
client.dispatch_event("NICK", old_nick, new_nick)
for channel in modified_channels:
client.dispatch_event("MEMBERS", channel)
@parser("INVITE")
def _parse_invite(client, command, actor, args):
"""Parse an INVITE and dispatch an event."""
target, _, channel = args.rpartition(" ")
client.dispatch_event("INVITE", actor, target, channel)
# vim: set ts=4 sts=4 sw=4 et:
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
'''
from django.contrib.auth.models import Champion, Player, Item
from rest_framework import viewsets
from tutorial.quickstart.serializers import ChampionSerializer, PlayerSerializer, ItemSerializer
class ChampionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Champion.objects.all()
serializer_class = ChampionSerializer
class PlayerViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
class ItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
'''
# Create your views here.
def index(request):
template = loader.get_template('app/splash.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
def about(request):
template = loader.get_template('app/about.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
def test(request):
template = loader.get_template('app/index.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
#
# Table Pages
#
def players(request):
template = loader.get_template('app/players.html')
context = RequestContext(request, {
'playerdata' : ''
})
return HttpResponse(template.render(context))
def items(request):
template = loader.get_template('app/items.html')
context = RequestContext(request, {
'itemdata' : ''
})
return HttpResponse(template.render(context))
def champions(request):
template = loader.get_template('app/champions.html')
context = RequestContext(request, {
'championdata' : ''
})
return HttpResponse(template.render(context))
#
# Champion Pages
#
def champion(request, id):
template = loader.get_template('app/champion.html')
context = RequestContext(request, {
'id' : name
})
return HttpResponse(template.render(context))
#
# Item Pages
#
def item(request, id):
template = loader.get_template('app/item.html')
context = RequestContext(request, {
'id' : id
})
return HttpResponse(template.render(context))
#
# Player Pages
#
def player(request, id):
template = loader.get_template('app/player.html')
context = RequestContext(request, {
'id' : id
})
return HttpResponse(template.render(context))
updated views
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
'''
from django.contrib.auth.models import Champion, Player, Item
from rest_framework import viewsets
from tutorial.quickstart.serializers import ChampionSerializer, PlayerSerializer, ItemSerializer
class ChampionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Champion.objects.all()
serializer_class = ChampionSerializer
class PlayerViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
class ItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
'''
# Create your views here.
def index(request):
template = loader.get_template('app/splash.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
def about(request):
template = loader.get_template('app/about.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
def test(request):
template = loader.get_template('app/index.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
#
# Table Pages
#
def players(request):
template = loader.get_template('app/players.html')
context = RequestContext(request, {
'playerdata' : ''
})
return HttpResponse(template.render(context))
def items(request):
template = loader.get_template('app/items.html')
context = RequestContext(request, {
'itemdata' : ''
})
return HttpResponse(template.render(context))
def champions(request):
template = loader.get_template('app/champions.html')
context = RequestContext(request, {
'championdata' : ''
})
return HttpResponse(template.render(context))
#
# Champion Pages
#
def champion(request, id):
template = loader.get_template('app/champion.html')
context = RequestContext(request, {
'id' : id
'name' : id
})
return HttpResponse(template.render(context))
#
# Item Pages
#
def item(request, id):
template = loader.get_template('app/item.html')
context = RequestContext(request, {
'id' : id
})
return HttpResponse(template.render(context))
#
# Player Pages
#
def player(request, id):
template = loader.get_template('app/player.html')
context = RequestContext(request, {
'id' : id
})
return HttpResponse(template.render(context))
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 23-08-2017 14:40
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
This module models the trackhub registry
"""
import json
import requests
# App imports
import config_manager
import ensembl.service
from . import models as trackhub_models
# Registry request body model
class TrackhubRegistryRequestBodyModel:
def __init__(self):
self.logger = config_manager.get_app_config_manager().get_logger_for(
"{}.{}".format(__name__, type(self).__name__))
# hub.txt URL
self.url = None
self.assembly_accession_map = {}
def add_accession_for_assembly(self, assembly, accession):
if assembly in self.assembly_accession_map:
self.logger.error(
"DUPLICATED Assembly '{}' add request, existing accession '{}', "
"accession requested to be added '{}' - SKIPPED".format(assembly, self.assembly_accession_map[assembly],
accession))
else:
self.assembly_accession_map[assembly] = accession
self.logger.info("Assembly '{}' entry added to request body with accession '{}'"
.format(assembly, accession))
def __str__(self):
return json.dumps({'url': self.url, 'assemblies': self.assembly_accession_map})
# Visitor to export the trackhub as an instance of TrackhubRegistryRequestBodyModel
class TrackhubRegistryRequestBodyModelExporter(trackhub_models.TrackHubExporter):
def __init__(self):
super().__init__()
def export_simple_trackhub(self, trackhub_builder):
# In this case, the export summary will be an instance of TrackhubRegistryRequestBodyModelExporter
if not self.export_summary:
self.export_summary = TrackhubRegistryRequestBodyModel()
ensembl_species_service = ensembl.service.get_service().get_species_data_service()
for assembly in trackhub_builder.assemblies:
self.export_summary \
.add_accession_for_assembly(assembly,
ensembl_species_service.get_species_entry_for_assembly(assembly))
return self.export_summary
class TrackhubRegistryService:
__TRACKHUB_REGISTRY_API_SUBPATH_LOGIN = '/api/login'
def __init__(self, username, password):
self.username = username
self.password = password
self.trackhub_registry_base_url = 'https://www.trackhubregistry.org'
def __login(self):
# TODO
pass
def __logout(self):
# TODO
pass
def publish_trackhub(self, hub_url, trackhub_registry_model):
# TODO
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
make login request to Trackhub registry API
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 23-08-2017 14:40
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
This module models the trackhub registry
"""
import json
import requests
# App imports
import config_manager
import ensembl.service
from . import models as trackhub_models
# Registry request body model
class TrackhubRegistryRequestBodyModel:
def __init__(self):
self.logger = config_manager.get_app_config_manager().get_logger_for(
"{}.{}".format(__name__, type(self).__name__))
# hub.txt URL
self.url = None
self.assembly_accession_map = {}
def add_accession_for_assembly(self, assembly, accession):
if assembly in self.assembly_accession_map:
self.logger.error(
"DUPLICATED Assembly '{}' add request, existing accession '{}', "
"accession requested to be added '{}' - SKIPPED".format(assembly, self.assembly_accession_map[assembly],
accession))
else:
self.assembly_accession_map[assembly] = accession
self.logger.info("Assembly '{}' entry added to request body with accession '{}'"
.format(assembly, accession))
def __str__(self):
return json.dumps({'url': self.url, 'assemblies': self.assembly_accession_map})
# Visitor to export the trackhub as an instance of TrackhubRegistryRequestBodyModel
class TrackhubRegistryRequestBodyModelExporter(trackhub_models.TrackHubExporter):
def __init__(self):
super().__init__()
def export_simple_trackhub(self, trackhub_builder):
# In this case, the export summary will be an instance of TrackhubRegistryRequestBodyModelExporter
if not self.export_summary:
self.export_summary = TrackhubRegistryRequestBodyModel()
ensembl_species_service = ensembl.service.get_service().get_species_data_service()
for assembly in trackhub_builder.assemblies:
self.export_summary \
.add_accession_for_assembly(assembly,
ensembl_species_service.get_species_entry_for_assembly(assembly))
return self.export_summary
class TrackhubRegistryService:
__TRACKHUB_REGISTRY_API_SUBPATH_LOGIN = '/api/login'
def __init__(self, username, password):
self.username = username
self.password = password
self.trackhub_registry_base_url = 'https://www.trackhubregistry.org'
def __login(self):
# TODO
response = requests.get("{}{}"
.format(self.trackhub_registry_base_url,
self.__TRACKHUB_REGISTRY_API_SUBPATH_LOGIN),
auth=(self.username, self.password),
verify=True)
def __logout(self):
# TODO
pass
def publish_trackhub(self, hub_url, trackhub_registry_model):
# TODO
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
|
import json
from figment import Component
from figment.utils import indent
from theworldfoundry.modes import ActionMode
class Meta(Component):
"""Enables an entity to use meta commands."""
class Admin(Component):
"""Enables an entity to use admin commands."""
def __init__(self, aliases={}):
self.aliases = aliases
def to_dict(self):
return {
'aliases': self.aliases,
}
@classmethod
def from_dict(cls, dict_):
return cls(
aliases=dict_['aliases'],
)
@ActionMode.action(r'^!connect$')
def connect(actor):
if not actor.is_(Meta):
actor.tell("You're unable to do that.")
return
actor.tell("Welcome to The High Street, {0.Named.Name}.".format(actor))
@ActionMode.action(r'^!q(?:uery)?(?: (?P<query>.+))?$')
def query(actor, query=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
for entity in actor.zone.all():
if query is None:
actor.tell('[{0.id}]'.format(entity))
elif entity.is_('Named') and query.lower() in entity.Named.name.lower():
actor.tell('[{0.id}] {0.Named.name}'.format(entity))
@ActionMode.action(r'^!i(?:nspect)? (?P<entity_id>.+)$')
def inspect(actor, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity.')
return
actor.tell('[{0.id}]'.format(entity))
actor.tell(indent('hearing: {0}', depth=1).format(json.dumps(entity.hearing)))
for component in entity.components:
actor.tell(indent('{0}').format(component.__class__.__name__))
for key, value in component.to_dict().items():
actor.tell(indent('{0}: {1}', depth=2).format(key, json.dumps(value)))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) detach (?P<component_class_name>.+)$')
def detach(actor, entity_id, component_class_name, arguments=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
component_class = entity.zone.components.get(component_class_name)
if component_class is None:
actor.tell('No such component "{0}".'.format(component_class_name))
return
if not entity.is_(component_class):
actor.tell('[{0.id}] has no component "{1}".'.format(entity, component_class_name))
return
entity.components.remove(component_class)
actor.tell('[{0.id}] is no longer "{1}".'.format(entity, component_class_name))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) attach (?P<component_class_name>.+?)(?: (?P<arguments>.+))?$')
def attach(actor, entity_id, component_class_name, arguments=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
component_class = entity.zone.components.get(component_class_name)
if component_class is None:
actor.tell('No such component "{0}".'.format(component_class_name))
return
if entity.is_(component_class):
actor.tell('[{0.id}] already has component "{1}".'.format(entity, component_class_name))
return
if arguments is None:
arguments = '{}'
try:
component = component_class.from_dict(json.loads(arguments))
except Exception as e:
actor.tell('[{0.id}] failed to attach "{1}":'.format(entity, component_class_name))
actor.tell(indent(str(e)))
return
entity.components.add(component)
actor.tell('[{0.id}] is now "{1}".'.format(entity, component_class_name))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) set (?P<attribute>.+?) (?P<value>.+)$')
def set_attribute(actor, entity_id, attribute, value):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
try:
value = json.loads(value)
except Exception as e:
actor.tell('[{0.id}] failed to set "{1}":'.format(entity, attribute))
actor.tell(indent(str(e)))
return
if attribute == 'hearing':
entity.hearing = value
else:
# TODO: Components
# Split attribute on dot
pass
actor.tell('OK.')
@ActionMode.action(r'^!s(?:pawn)?(?: (?P<template>.+))?$')
def spawn(actor, template=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
# TODO: handle templates
entity = actor.zone.spawn()
actor.perform(add_alias, alias='!s', entity_id=entity.id)
actor.tell('Spawned [{0.id}].'.format(entity))
@ActionMode.action(r'^!d(?:estroy)? (?P<entity_id>.+)$')
def destroy(actor, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity.')
return
entity.zone.destroy(entity)
actor.tell('Destroyed [{0.id}].'.format(entity))
@ActionMode.action(r'^!a(?:lias)? (?:add|create|set) (?P<alias>.+) (?P<entity_id>.+)$')
def add_alias(actor, alias, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
actor.Admin.aliases[alias] = entity_id
@ActionMode.action(r'^!a(?:lias)? (rm|remove|del(ete)?|unset) (?P<alias>.+)$')
def remove_alias(actor, alias):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
actor.Admin.aliases.pop(alias, None)
@ActionMode.action(r'^!a(?:lias)?(?: list)?$')
def list_aliases(actor):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
for alias, entity_id in sorted(actor.Admin.aliases.items()):
entity = actor.zone.get(entity_id)
actor.tell('{0}: {1}'.format(alias, entity_id))
Show name in Admin query if entity is Named
import json
from figment import Component
from figment.utils import indent
from theworldfoundry.modes import ActionMode
class Meta(Component):
"""Enables an entity to use meta commands."""
class Admin(Component):
"""Enables an entity to use admin commands."""
def __init__(self, aliases={}):
self.aliases = aliases
def to_dict(self):
return {
'aliases': self.aliases,
}
@classmethod
def from_dict(cls, dict_):
return cls(
aliases=dict_['aliases'],
)
@ActionMode.action(r'^!connect$')
def connect(actor):
if not actor.is_(Meta):
actor.tell("You're unable to do that.")
return
actor.tell("Welcome to The High Street, {0.Named.Name}.".format(actor))
@ActionMode.action(r'^!q(?:uery)?(?: (?P<query>.+))?$')
def query(actor, query=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
for entity in actor.zone.all():
name = entity.Named.name if entity.is_('Named') else 'something unnamed'
if query is None or query.lower() in name.lower():
actor.tell('[{0.id}] {1}'.format(entity, name))
@ActionMode.action(r'^!i(?:nspect)? (?P<entity_id>.+)$')
def inspect(actor, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity.')
return
actor.tell('[{0.id}]'.format(entity))
actor.tell(indent('hearing: {0}', depth=1).format(json.dumps(entity.hearing)))
for component in entity.components:
actor.tell(indent('{0}').format(component.__class__.__name__))
for key, value in component.to_dict().items():
actor.tell(indent('{0}: {1}', depth=2).format(key, json.dumps(value)))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) detach (?P<component_class_name>.+)$')
def detach(actor, entity_id, component_class_name, arguments=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
component_class = entity.zone.components.get(component_class_name)
if component_class is None:
actor.tell('No such component "{0}".'.format(component_class_name))
return
if not entity.is_(component_class):
actor.tell('[{0.id}] has no component "{1}".'.format(entity, component_class_name))
return
entity.components.remove(component_class)
actor.tell('[{0.id}] is no longer "{1}".'.format(entity, component_class_name))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) attach (?P<component_class_name>.+?)(?: (?P<arguments>.+))?$')
def attach(actor, entity_id, component_class_name, arguments=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
component_class = entity.zone.components.get(component_class_name)
if component_class is None:
actor.tell('No such component "{0}".'.format(component_class_name))
return
if entity.is_(component_class):
actor.tell('[{0.id}] already has component "{1}".'.format(entity, component_class_name))
return
if arguments is None:
arguments = '{}'
try:
component = component_class.from_dict(json.loads(arguments))
except Exception as e:
actor.tell('[{0.id}] failed to attach "{1}":'.format(entity, component_class_name))
actor.tell(indent(str(e)))
return
entity.components.add(component)
actor.tell('[{0.id}] is now "{1}".'.format(entity, component_class_name))
@ActionMode.action(r'^!e(?:dit)? (?P<entity_id>.+) set (?P<attribute>.+?) (?P<value>.+)$')
def set_attribute(actor, entity_id, attribute, value):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity "{0}".'.format(entity_id))
return
try:
value = json.loads(value)
except Exception as e:
actor.tell('[{0.id}] failed to set "{1}":'.format(entity, attribute))
actor.tell(indent(str(e)))
return
if attribute == 'hearing':
entity.hearing = value
else:
# TODO: Components
# Split attribute on dot
pass
actor.tell('OK.')
@ActionMode.action(r'^!s(?:pawn)?(?: (?P<template>.+))?$')
def spawn(actor, template=None):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
# TODO: handle templates
entity = actor.zone.spawn()
actor.perform(add_alias, alias='!s', entity_id=entity.id)
actor.tell('Spawned [{0.id}].'.format(entity))
@ActionMode.action(r'^!d(?:estroy)? (?P<entity_id>.+)$')
def destroy(actor, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
entity = actor.zone.get(entity_id)
if entity is None:
actor.tell('No such entity.')
return
entity.zone.destroy(entity)
actor.tell('Destroyed [{0.id}].'.format(entity))
@ActionMode.action(r'^!a(?:lias)? (?:add|create|set) (?P<alias>.+) (?P<entity_id>.+)$')
def add_alias(actor, alias, entity_id):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
entity_id = actor.Admin.aliases.get(entity_id, entity_id)
try:
entity_id = int(entity_id)
except ValueError:
actor.tell('Entity ID must be numeric.')
return
actor.Admin.aliases[alias] = entity_id
@ActionMode.action(r'^!a(?:lias)? (rm|remove|del(ete)?|unset) (?P<alias>.+)$')
def remove_alias(actor, alias):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
actor.Admin.aliases.pop(alias, None)
@ActionMode.action(r'^!a(?:lias)?(?: list)?$')
def list_aliases(actor):
if not actor.is_(Admin):
actor.tell("You're unable to do that.")
return
for alias, entity_id in sorted(actor.Admin.aliases.items()):
entity = actor.zone.get(entity_id)
actor.tell('{0}: {1}'.format(alias, entity_id))
|
from datetime import datetime, timedelta
from django.test import TestCase
from devilry.apps.core.testhelper import TestHelper
from devilry.utils.rest_testclient import RestClient
from devilry.apps.core.models import Deadline
from devilry.apps.core.models import AssignmentGroup
from devilry.utils.restformat import format_datetime
from devilry_subjectadmin.rest.deadlinesbulk import encode_bulkdeadline_id
from devilry_subjectadmin.rest.deadlinesbulk import decode_bulkdeadline_id
from devilry_subjectadmin.rest.deadlinesbulk import sha1hash
class TestRestDeadlinesBulkList(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)'], # 2 months ago
assignments=['a1:admin(adm):pub(0)']) # 0 days after period begins
self.client = RestClient()
self.url = '/devilry_subjectadmin/rest/deadlinesbulk/{0}/'.format(self.testhelper.sub_p1_a1.id)
def _listas(self, username, **data):
self.client.login(username=username, password='test')
return self.client.rest_get(self.url, **data)
def test_get_empty(self):
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 0)
def test_get_simple(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 1)
d1 = content[0]
self.assertEquals(set(d1.keys()),
set(['deadline', 'text', 'groups', 'offset_from_now',
'in_the_future', 'bulkdeadline_id', 'url', 'groups']))
self.assertEquals(len(d1['groups']), 3)
self.assertEquals(d1['in_the_future'], False)
self.assertEquals(d1['text'], None)
def test_get_textdifference(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
# Change text on g1_d1, which should make it a separate entry in the list
self.testhelper.sub_p1_a1_g1_d1.text = 'Test'
self.testhelper.sub_p1_a1_g1_d1.save()
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 2)
g2_g3_d1 = content[0]
g1_d1 = content[1]
self.assertEquals(g1_d1['text'], 'Test')
self.assertEquals(len(g1_d1['groups']), 1)
self.assertEquals(g2_g3_d1['text'], None)
self.assertEquals(len(g2_g3_d1['groups']), 2)
def test_get_multiple_and_order(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
for groupnum in xrange(2):
# deadline 70 days after assignment starts, which should be in the future
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d2:ends(70)'.format(groupnum))
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 2)
d1 = content[1]
d2 = content[0]
self.assertEquals(len(d1['groups']), 3)
self.assertEquals(d1['in_the_future'], False)
self.assertEquals(len(d2['groups']), 2)
self.assertEquals(d2['in_the_future'], True)
def test_get_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._listas('nobody')
self.assertEquals(response.status_code, 403)
class TestRestDeadlinesBulkCreate(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)']) # 2 months ago
self.testhelper.sub_p1.start_time = datetime(2000, 1, 1, 22, 30, 49)
self.testhelper.sub_p1.save()
self.testhelper.add_to_path('uni;sub.p1.a1:admin(adm):pub(0)') # 0 days after period begins + 2 sec
self.client = RestClient()
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}:candidate(cand1):examiner(exam1).d1:ends(5)'.format(groupnum))
def _geturl(self):
return '/devilry_subjectadmin/rest/deadlinesbulk/{0}/'.format(self.testhelper.sub_p1_a1.id)
def _postas(self, username, data):
self.client.login(username=username, password='test')
return self.client.rest_post(self._geturl(), data)
def _itergroups(self):
for groupnum in xrange(3):
group_id = getattr(self.testhelper, 'sub_p1_a1_g{0}'.format(groupnum)).id
group = AssignmentGroup.objects.get(id=group_id)
yield group
def test_post_no_matching_groups(self):
for group in self._itergroups():
deadlines = group.deadlines.all().order_by('-deadline')
self.assertEquals(group.feedback, None)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed'})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['field_errors']['createmode'], ['The given option did not match any groups.'])
def test_post_createmode_failed(self):
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
g1 = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_g1.id)
self.assertFalse(g1.is_open)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 1)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 1)
g1 = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_g1.id)
deadlines = g1.deadlines.all().order_by('-deadline')
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
self.assertTrue(g1.is_open) # Group was automatically opened in devilry.apps.core.models.Deadline.save()
def test_post_createmode_failed_or_no_feedback(self):
# Fail g0
self.testhelper.add_delivery('sub.p1.a1.g0', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g0', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
# Pass g1
self.testhelper.add_delivery('sub.p1.a1.g1', {'good.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'A', 'points': 100, 'is_passing_grade': True})
# g2 has no feedback
self.assertEquals(self.testhelper.sub_p1_a1_g2.feedback, None)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed-or-no-feedback'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(len(content['groups']), 2)
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 2)
g0 = self.testhelper.sub_p1_a1_g0
g2 = self.testhelper.sub_p1_a1_g2
for group in (g0, g2):
deadlines = group.deadlines.all().order_by('-deadline')
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
g1 = self.testhelper.sub_p1_a1_g1
self.assertEquals(g1.deadlines.count(), 1) # We did not a deadline to g1 because they have passing grade
def test_post_createmode_no_deadlines(self):
self.assertEquals(self.testhelper.sub_p1_a1_g1.deadlines.count(), 1)
self.testhelper.add_to_path('uni;sub.p1.a1.extragroup')
self.assertEquals(self.testhelper.sub_p1_a1_extragroup.deadlines.count(), 0)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'no-deadlines'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 1)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 1)
extragroup = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_extragroup.id)
deadlines = extragroup.deadlines.all()
self.assertEquals(len(deadlines), 1)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
self.assertTrue(extragroup.is_open) # Group was automatically opened in devilry.apps.core.models.Deadline.save()
def test_post_createmode_specific_groups(self):
self.assertEquals(self.testhelper.sub_p1_a1_g1.deadlines.count(), 1)
self.assertEquals(self.testhelper.sub_p1_a1_g2.deadlines.count(), 1)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'group_ids': [self.testhelper.sub_p1_a1_g1.id,
self.testhelper.sub_p1_a1_g2.id],
'createmode': 'specific-groups'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 2)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 2)
g1 = self.testhelper.reload_from_db(self.testhelper.sub_p1_a1_g1)
deadlines = g1.deadlines.all()
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
def test_post_createmode_specific_groups_nogroups(self):
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'specific-groups'})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['field_errors']['group_ids'][0],
'``group_ids`` is required when ``createmode=="specific-groups"``.')
def test_post_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._postas('nobody', {})
self.assertEquals(response.status_code, 403)
class TestRestDeadlinesBulkUpdateReadOrDelete(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)']) # 2 months ago
self.testhelper.sub_p1.start_time = datetime(2000, 1, 1, 22, 30, 49)
self.testhelper.sub_p1.save()
self.testhelper.add_to_path('uni;sub.p1.a1:admin(adm):pub(0)') # 0 days after period begins + 2 sec
self.client = RestClient()
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}:candidate(cand1):examiner(exam1).d1:ends(5)'.format(groupnum))
def _geturl(self, deadline=None):
deadline = deadline or self.testhelper.sub_p1_a1_g1_d1
bulkdeadline_id = encode_bulkdeadline_id(deadline)
return '/devilry_subjectadmin/rest/deadlinesbulk/{0}/{1}'.format(self.testhelper.sub_p1_a1.id,
bulkdeadline_id)
def test_encode_unique_bulkdeadline_id(self):
d = Deadline(deadline=datetime(2000, 12, 24, 22, 30, 49))
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--')
d.text = 'Hello world'
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--{0}'.format(sha1hash('Hello world')))
# Ensure unicode works
d.text = u'\u00e5ello world'
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--{0}'.format(sha1hash(u'\u00e5ello world')))
def test_decode_unique_bulkdeadline_id(self):
self.assertEquals(decode_bulkdeadline_id('2000-12-24T22_30_49--'),
(datetime(2000, 12, 24, 22, 30, 49), ''))
self.assertEquals(decode_bulkdeadline_id('2000-12-24T22_30_49--{0}'.format(sha1hash('Hello world'))),
(datetime(2000, 12, 24, 22, 30, 49), sha1hash('Hello world')))
def _putas(self, username, data):
self.client.login(username=username, password='test')
return self.client.rest_put(self._geturl(), data)
def test_put(self):
self.assertEquals(self.testhelper.sub_p1_a1.publishing_time, datetime(2000, 1, 1, 22, 30, 51))
self.assertEquals(self.testhelper.sub_p1_a1_g1_d1.deadline,
datetime(2000, 1, 1, 22, 30, 51) + timedelta(days=5))
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 3)
self.assertEquals(content['text'], 'Hello')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
for groupnum in xrange(3):
deadline_id = getattr(self.testhelper, 'sub_p1_a1_g{0}_d1'.format(groupnum)).id
deadline = Deadline.objects.get(id=deadline_id)
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, 'Hello')
def test_put_nonetext(self):
self.assertEquals(self.testhelper.sub_p1_a1.publishing_time, datetime(2000, 1, 1, 22, 30, 51))
self.assertEquals(self.testhelper.sub_p1_a1_g1_d1.deadline,
datetime(2000, 1, 1, 22, 30, 51) + timedelta(days=5))
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': None})
self.assertEquals(response.status_code, 200)
for groupnum in xrange(3):
deadline_id = getattr(self.testhelper, 'sub_p1_a1_g{0}_d1'.format(groupnum)).id
deadline = Deadline.objects.get(id=deadline_id)
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, '')
def test_put_before_publishingtime(self):
new_deadline = datetime(1990, 1, 1, 20, 30, 40)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': None})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['errors'], ['Deadline cannot be before publishing time.'])
def test_put_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._putas('nobody', {})
self.assertEquals(response.status_code, 403)
def test_put_with_bulkdeadline_id(self):
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'bulkdeadline_id': 'ignored'})
self.assertEquals(response.status_code, 200)
def test_put_group_ids(self):
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
g1 = self.testhelper.sub_p1_a1_g1
g2 = self.testhelper.sub_p1_a1_g2
self.assertEquals(g1.deadlines.count(), 1)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Updated',
'group_ids': [g1.id, g2.id]})
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content['groups']), 2)
group_ids = set([g['id'] for g in content['groups']])
self.assertEquals(group_ids, set([g1.id, g2.id]))
for group in g1, g2:
group = self.testhelper.reload_from_db(group)
self.assertEquals(group.deadlines.count(), 1)
deadline = group.deadlines.all()[0]
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, 'Updated')
def _getas(self, username, **data):
self.client.login(username=username, password='test')
return self.client.rest_get(self._geturl(), **data)
def test_get(self):
content, response = self._getas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
self.testhelper.sub_p1_a1_g1_d1.deadline)
self.assertEquals(content['deadline'],
format_datetime(self.testhelper.sub_p1_a1_g1_d1.deadline))
self.assertEquals(content['text'], None)
self.assertEquals(len(content['groups']), 3)
def test_get_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._getas('nobody')
self.assertEquals(response.status_code, 403)
def _deleteas(self, username, deadline=None):
self.client.login(username=username, password='test')
return self.client.rest_delete(self._geturl(deadline))
def test_delete_sanity(self):
# Test that the deadline method actually does what it is supposed to do (only deletes what it should delete)
new_deadline = datetime(2004, 12, 24, 20, 30, 40)
created_deadline = None
for groupnum in xrange(2):
group = getattr(self.testhelper, 'sub_p1_a1_g{0}'.format(groupnum))
self.assertEquals(1, group.deadlines.count())
created_deadline = group.deadlines.create(deadline=new_deadline) # NOTE: Does not matter which deadline object we user, since we only need the datetime and text to generate bulkdeadline_id
self.assertEquals(2, group.deadlines.count())
self.testhelper.sub_p1_a1_g2.deadlines.create(deadline=datetime(2006, 12, 24, 20, 30, 40))
self.testhelper.create_superuser('superuser')
content, response = self._deleteas('superuser', deadline=created_deadline)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content['deleted_deadline_ids']), 2)
self.assertEquals(1, self.testhelper.sub_p1_a1_g0.deadlines.count())
self.assertEquals(1, self.testhelper.sub_p1_a1_g1.deadlines.count())
self.assertEquals(2, self.testhelper.sub_p1_a1_g2.deadlines.count())
def test_delete_with_content_as_superuser(self):
self.testhelper.create_superuser('superuser')
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
content, response = self._deleteas('superuser')
self.assertEquals(response.status_code, 200)
def test_delete_with_content_as_assignmentadm(self):
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
content, response = self._deleteas('adm')
self.assertEquals(response.status_code, 403)
def test_delete_without_content_as_assignmentadm(self):
content, response = self._deleteas('adm')
self.assertEquals(response.status_code, 200)
def test_delete_as_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._deleteas('nobody')
self.assertEquals(response.status_code, 403)
devilry_subjectadmin...deadlinesbulk: Fixed most of the tests.
from datetime import datetime, timedelta
from django.test import TestCase
from devilry.apps.core.testhelper import TestHelper
from devilry.utils.rest_testclient import RestClient
from devilry.apps.core.models import Deadline
from devilry.apps.core.models import AssignmentGroup
from devilry.utils.restformat import format_datetime
from devilry_subjectadmin.rest.deadlinesbulk import encode_bulkdeadline_id
from devilry_subjectadmin.rest.deadlinesbulk import decode_bulkdeadline_id
from devilry_subjectadmin.rest.deadlinesbulk import sha1hash
class TestRestDeadlinesBulkList(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)'], # 2 months ago
assignments=['a1:admin(adm):pub(0)']) # 0 days after period begins
self.client = RestClient()
self.url = '/devilry_subjectadmin/rest/deadlinesbulk/{0}/'.format(self.testhelper.sub_p1_a1.id)
def _listas(self, username, **data):
self.client.login(username=username, password='test')
return self.client.rest_get(self.url, **data)
def test_get_empty(self):
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 0)
def test_get_simple(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 1)
d1 = content[0]
self.assertEquals(set(d1.keys()),
set(['deadline', 'text', 'groups', 'offset_from_now',
'in_the_future', 'bulkdeadline_id', 'url', 'groups']))
self.assertEquals(len(d1['groups']), 3)
self.assertEquals(d1['in_the_future'], False)
self.assertEquals(d1['text'], None)
def test_get_textdifference(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
# Change text on g1_d1, which should make it a separate entry in the list
self.testhelper.sub_p1_a1_g1_d1.text = 'Test'
self.testhelper.sub_p1_a1_g1_d1.save()
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 2)
g2_g3_d1 = content[0]
g1_d1 = content[1]
self.assertEquals(g1_d1['text'], 'Test')
self.assertEquals(len(g1_d1['groups']), 1)
self.assertEquals(g2_g3_d1['text'], None)
self.assertEquals(len(g2_g3_d1['groups']), 2)
def test_get_multiple_and_order(self):
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d1:ends(5)'.format(groupnum))
for groupnum in xrange(2):
# deadline 70 days after assignment starts, which should be in the future
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}.d2:ends(70)'.format(groupnum))
content, response = self._listas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content), 2)
d1 = content[1]
d2 = content[0]
self.assertEquals(len(d1['groups']), 3)
self.assertEquals(d1['in_the_future'], False)
self.assertEquals(len(d2['groups']), 2)
self.assertEquals(d2['in_the_future'], True)
def test_get_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._listas('nobody')
self.assertEquals(response.status_code, 403)
class TestRestDeadlinesBulkCreate(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)']) # 2 months ago
self.testhelper.sub_p1.start_time = datetime(2000, 1, 1, 22, 30, 49)
self.testhelper.sub_p1.save()
self.testhelper.add_to_path('uni;sub.p1.a1:admin(adm):pub(0)') # 0 days after period begins + 2 sec
self.client = RestClient()
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}:candidate(cand1):examiner(exam1).d1:ends(5)'.format(groupnum))
def _geturl(self):
return '/devilry_subjectadmin/rest/deadlinesbulk/{0}/'.format(self.testhelper.sub_p1_a1.id)
def _postas(self, username, data):
self.client.login(username=username, password='test')
return self.client.rest_post(self._geturl(), data)
def _itergroups(self):
for groupnum in xrange(3):
group_id = getattr(self.testhelper, 'sub_p1_a1_g{0}'.format(groupnum)).id
group = AssignmentGroup.objects.get(id=group_id)
yield group
def test_post_no_matching_groups(self):
for group in self._itergroups():
deadlines = group.deadlines.all().order_by('-deadline')
self.assertEquals(group.feedback, None)
new_deadline = datetime(2004, 12, 24, 20, 30)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed'})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['field_errors']['createmode'], ['The given option did not match any groups.'])
def test_post_createmode_failed(self):
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
g1 = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_g1.id)
self.assertFalse(g1.is_open)
new_deadline = datetime(2004, 12, 24, 20, 30)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 1)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 1)
g1 = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_g1.id)
deadlines = g1.deadlines.all().order_by('-deadline')
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
self.assertTrue(g1.is_open) # Group was automatically opened in devilry.apps.core.models.Deadline.save()
def test_post_createmode_failed_or_no_feedback(self):
# Fail g0
self.testhelper.add_delivery('sub.p1.a1.g0', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g0', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
# Pass g1
self.testhelper.add_delivery('sub.p1.a1.g1', {'good.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'A', 'points': 100, 'is_passing_grade': True})
# g2 has no feedback
self.assertEquals(self.testhelper.sub_p1_a1_g2.feedback, None)
new_deadline = datetime(2004, 12, 24, 20, 30)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'failed-or-no-feedback'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(len(content['groups']), 2)
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 2)
g0 = self.testhelper.sub_p1_a1_g0
g2 = self.testhelper.sub_p1_a1_g2
for group in (g0, g2):
deadlines = group.deadlines.all().order_by('-deadline')
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
g1 = self.testhelper.sub_p1_a1_g1
self.assertEquals(g1.deadlines.count(), 1) # We did not a deadline to g1 because they have passing grade
def test_post_createmode_no_deadlines(self):
self.assertEquals(self.testhelper.sub_p1_a1_g1.deadlines.count(), 1)
self.testhelper.add_to_path('uni;sub.p1.a1.extragroup')
self.assertEquals(self.testhelper.sub_p1_a1_extragroup.deadlines.count(), 0)
new_deadline = datetime(2004, 12, 24, 20, 30)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'no-deadlines'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 1)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 1)
extragroup = AssignmentGroup.objects.get(id=self.testhelper.sub_p1_a1_extragroup.id)
deadlines = extragroup.deadlines.all()
self.assertEquals(len(deadlines), 1)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
self.assertTrue(extragroup.is_open) # Group was automatically opened in devilry.apps.core.models.Deadline.save()
def test_post_createmode_specific_groups(self):
self.assertEquals(self.testhelper.sub_p1_a1_g1.deadlines.count(), 1)
self.assertEquals(self.testhelper.sub_p1_a1_g2.deadlines.count(), 1)
new_deadline = datetime(2004, 12, 24, 20, 30)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'group_ids': [self.testhelper.sub_p1_a1_g1.id,
self.testhelper.sub_p1_a1_g2.id],
'createmode': 'specific-groups'})
# Check response
self.assertEquals(response.status_code, 201)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 2)
self.assertEquals(content['text'], 'Created')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
# Check actual data
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 2)
g1 = self.testhelper.reload_from_db(self.testhelper.sub_p1_a1_g1)
deadlines = g1.deadlines.all()
self.assertEquals(len(deadlines), 2)
self.assertEquals(deadlines[0].deadline, new_deadline)
self.assertEquals(deadlines[0].text, 'Created')
def test_post_createmode_specific_groups_nogroups(self):
new_deadline = datetime(2004, 12, 24, 20, 30)
self.assertEquals(Deadline.objects.filter(deadline=new_deadline).count(), 0)
content, response = self._postas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Created',
'createmode': 'specific-groups'})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['field_errors']['group_ids'][0],
'``group_ids`` is required when ``createmode=="specific-groups"``.')
def test_post_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._postas('nobody', {})
self.assertEquals(response.status_code, 403)
class TestRestDeadlinesBulkUpdateReadOrDelete(TestCase):
def setUp(self):
self.testhelper = TestHelper()
self.testhelper.add(nodes='uni',
subjects=['sub'],
periods=['p1:begins(-2)']) # 2 months ago
self.testhelper.sub_p1.start_time = datetime(2000, 1, 1, 22, 30, 49)
self.testhelper.sub_p1.save()
self.testhelper.add_to_path('uni;sub.p1.a1:admin(adm):pub(0)') # 0 days after period begins + 2 sec
self.client = RestClient()
for groupnum in xrange(3):
# deadline 5 days after assignment starts
self.testhelper.add_to_path('uni;sub.p1.a1.g{0}:candidate(cand1):examiner(exam1).d1:ends(5)'.format(groupnum))
def _geturl(self, deadline=None):
deadline = deadline or self.testhelper.sub_p1_a1_g1_d1
bulkdeadline_id = encode_bulkdeadline_id(deadline)
return '/devilry_subjectadmin/rest/deadlinesbulk/{0}/{1}'.format(self.testhelper.sub_p1_a1.id,
bulkdeadline_id)
def test_encode_unique_bulkdeadline_id(self):
d = Deadline(deadline=datetime(2000, 12, 24, 22, 30, 49))
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--')
d.text = 'Hello world'
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--{0}'.format(sha1hash('Hello world')))
# Ensure unicode works
d.text = u'\u00e5ello world'
self.assertEquals(encode_bulkdeadline_id(d),
'2000-12-24T22_30_49--{0}'.format(sha1hash(u'\u00e5ello world')))
def test_decode_unique_bulkdeadline_id(self):
self.assertEquals(decode_bulkdeadline_id('2000-12-24T22_30_49--'),
(datetime(2000, 12, 24, 22, 30, 49), ''))
self.assertEquals(decode_bulkdeadline_id('2000-12-24T22_30_49--{0}'.format(sha1hash('Hello world'))),
(datetime(2000, 12, 24, 22, 30, 49), sha1hash('Hello world')))
def _putas(self, username, data):
self.client.login(username=username, password='test')
return self.client.rest_put(self._geturl(), data)
def test_put(self):
self.assertEquals(self.testhelper.sub_p1_a1.publishing_time, datetime(2000, 1, 1, 22, 30, 51))
self.assertEquals(self.testhelper.sub_p1_a1_g1_d1.deadline,
datetime(2000, 1, 1, 22, 30) + timedelta(days=5))
new_deadline = datetime(2004, 12, 24, 20, 30)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
new_deadline)
self.assertEquals(len(content['groups']), 3)
self.assertEquals(content['text'], 'Hello')
self.assertEquals(content['deadline'], format_datetime(new_deadline))
for groupnum in xrange(3):
deadline_id = getattr(self.testhelper, 'sub_p1_a1_g{0}_d1'.format(groupnum)).id
deadline = Deadline.objects.get(id=deadline_id)
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, 'Hello')
def test_put_nonetext(self):
self.assertEquals(self.testhelper.sub_p1_a1.publishing_time, datetime(2000, 1, 1, 22, 30, 51))
self.assertEquals(self.testhelper.sub_p1_a1_g1_d1.deadline,
datetime(2000, 1, 1, 22, 30) + timedelta(days=5))
new_deadline = datetime(2004, 12, 24, 20, 30)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': None})
self.assertEquals(response.status_code, 200)
for groupnum in xrange(3):
deadline_id = getattr(self.testhelper, 'sub_p1_a1_g{0}_d1'.format(groupnum)).id
deadline = Deadline.objects.get(id=deadline_id)
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, '')
def test_put_before_publishingtime(self):
new_deadline = datetime(1990, 1, 1, 20, 30, 40)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': None})
self.assertEquals(response.status_code, 400)
self.assertEquals(content['errors'], ['Deadline cannot be before publishing time.'])
def test_put_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._putas('nobody', {})
self.assertEquals(response.status_code, 403)
def test_put_with_bulkdeadline_id(self):
new_deadline = datetime(2004, 12, 24, 20, 30)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'bulkdeadline_id': 'ignored'})
self.assertEquals(response.status_code, 200)
def test_put_group_ids(self):
new_deadline = datetime(2004, 12, 24, 20, 30)
g1 = self.testhelper.sub_p1_a1_g1
g2 = self.testhelper.sub_p1_a1_g2
self.assertEquals(g1.deadlines.count(), 1)
content, response = self._putas('adm', {'deadline': format_datetime(new_deadline),
'text': 'Updated',
'group_ids': [g1.id, g2.id]})
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content['groups']), 2)
group_ids = set([g['id'] for g in content['groups']])
self.assertEquals(group_ids, set([g1.id, g2.id]))
for group in g1, g2:
group = self.testhelper.reload_from_db(group)
self.assertEquals(group.deadlines.count(), 1)
deadline = group.deadlines.all()[0]
self.assertEquals(deadline.deadline, new_deadline)
self.assertEquals(deadline.text, 'Updated')
def _getas(self, username, **data):
self.client.login(username=username, password='test')
return self.client.rest_get(self._geturl(), **data)
def test_get(self):
content, response = self._getas('adm')
self.assertEquals(response.status_code, 200)
self.assertEquals(decode_bulkdeadline_id(content['bulkdeadline_id'])[0],
self.testhelper.sub_p1_a1_g1_d1.deadline)
self.assertEquals(content['deadline'],
format_datetime(self.testhelper.sub_p1_a1_g1_d1.deadline))
self.assertEquals(content['text'], None)
self.assertEquals(len(content['groups']), 3)
def test_get_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._getas('nobody')
self.assertEquals(response.status_code, 403)
def _deleteas(self, username, deadline=None):
self.client.login(username=username, password='test')
return self.client.rest_delete(self._geturl(deadline))
def test_delete_sanity(self):
# Test that the deadline method actually does what it is supposed to do (only deletes what it should delete)
new_deadline = datetime(2004, 12, 24, 20, 30)
created_deadline = None
for groupnum in xrange(2):
group = getattr(self.testhelper, 'sub_p1_a1_g{0}'.format(groupnum))
self.assertEquals(1, group.deadlines.count())
created_deadline = group.deadlines.create(deadline=new_deadline) # NOTE: Does not matter which deadline object we user, since we only need the datetime and text to generate bulkdeadline_id
self.assertEquals(2, group.deadlines.count())
self.testhelper.sub_p1_a1_g2.deadlines.create(deadline=datetime(2006, 12, 24, 20, 30))
self.testhelper.create_superuser('superuser')
content, response = self._deleteas('superuser', deadline=created_deadline)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(content['deleted_deadline_ids']), 2)
self.assertEquals(1, self.testhelper.sub_p1_a1_g0.deadlines.count())
self.assertEquals(1, self.testhelper.sub_p1_a1_g1.deadlines.count())
self.assertEquals(2, self.testhelper.sub_p1_a1_g2.deadlines.count())
def test_delete_with_content_as_superuser(self):
self.testhelper.create_superuser('superuser')
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
content, response = self._deleteas('superuser')
self.assertEquals(response.status_code, 200)
def test_delete_with_content_as_assignmentadm(self):
self.testhelper.add_delivery('sub.p1.a1.g1', {'bad.py': ['print ', 'bah']})
self.testhelper.add_feedback('sub.p1.a1.g1', verdict={'grade': 'F', 'points': 30, 'is_passing_grade': False})
content, response = self._deleteas('adm')
self.assertEquals(response.status_code, 403)
def test_delete_without_content_as_assignmentadm(self):
content, response = self._deleteas('adm')
self.assertEquals(response.status_code, 200)
def test_delete_as_nobody(self):
self.testhelper.create_user('nobody')
content, response = self._deleteas('nobody')
self.assertEquals(response.status_code, 403)
|
#!/usr/bin/perl
import re
import sys
import copy
import argparse
from BCBio import GFF
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name='blastxml2gff3')
__author__ = "Eric Rasche"
__version__ = "0.4.0"
__maintainer__ = "Eric Rasche"
__email__ = "esr@tamu.edu"
__doc__ = """
BlastXML files, when transformed to GFF3, do not normally show gaps in the
blast hits. This tool aims to fill that "gap".
"""
def blastxml2gff3(blastxml, min_gap=3, trim=False, trim_end=False):
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
blast_records = NCBIXML.parse(blastxml)
records = []
for record in blast_records:
rec = SeqRecord(Seq("ACTG"), id=record.query)
for hit in record.alignments:
for hsp in hit.hsps:
qualifiers = {
"source": "blast",
"score": hsp.expect,
"accession": hit.accession,
"hit_id": hit.hit_id,
"length": hit.length,
"hit_titles": hit.title.split(' >')
}
desc = hit.title.split(' >')[0]
qualifiers['description'] = desc[desc.index(' '):]
# This required a fair bit of sketching out/match to figure out
# the first time.
#
# the match_start location must account for queries and
# subjecst that start at locations other than 1
parent_match_start = hsp.query_start - hsp.sbjct_start
# The end is the start + hit.length because the match itself
# may be longer than the parent feature, so we use the supplied
# subject/hit length to calculate the real ending of the target
# protein.
parent_match_end = hsp.query_start + hit.length + hsp.query.count('-')
# However, if the user requests that we trim the feature, then
# we need to cut the ``match`` start to 0 to match the parent feature.
# We'll also need to cut the end to match the query's end. It (maybe)
# should be the feature end? But we don't have access to that data, so
# We settle for this.
if trim:
if parent_match_start < 1:
parent_match_start = 0
if trim or trim_end:
if parent_match_end > hsp.query_end:
parent_match_end = hsp.query_end + 1
# The ``protein_match`` feature will hold one or more ``match_part``s
top_feature = SeqFeature(
FeatureLocation(parent_match_start, parent_match_end),
type="protein_match", strand=0,
qualifiers=qualifiers
)
# Unlike the parent feature, ``match_part``s have sources.
part_qualifiers = {
"source": "blast",
}
top_feature.sub_features = []
for start, end, cigar in generate_parts(hsp.query, hsp.match,
hsp.sbjct,
ignore_under=min_gap):
part_qualifiers['Gap'] = cigar
part_qualifiers['ID'] = hit.hit_id
if trim:
# If trimming, then we start relative to the
# protein_match's start
match_part_start = parent_match_start + start
else:
# Otherwise, we have to account for the subject start's location
match_part_start = parent_match_start + hsp.sbjct_start + start - 1
# We used to use hsp.align_length here, but that includes
# gaps in the parent sequence
#
# Furthermore align_length will give calculation errors in weird places
# So we just use (end-start) for simplicity
match_part_end = match_part_start + (end - start)
top_feature.sub_features.append(
SeqFeature(
FeatureLocation(match_part_start, match_part_end),
type="match_part", strand=0,
qualifiers=copy.deepcopy(part_qualifiers))
)
rec.features.append(top_feature)
rec.annotations = {}
records.append(rec)
return records
def __remove_query_gaps(query, match, subject):
"""remove positions in all three based on gaps in query
In order to simplify math and calculations...we remove all of the gaps
based on gap locations in the query sequence::
Q:ACTG-ACTGACTG
S:ACTGAAC---CTG
will become::
Q:ACTGACTGACTG
S:ACTGAC---CTG
which greatly simplifies the process of identifying the correct location
for a match_part
"""
prev = 0
fq = ''
fm = ''
fs = ''
for position in re.finditer('-', query):
fq += query[prev:position.start()]
fm += match[prev:position.start()]
fs += subject[prev:position.start()]
prev = position.start() + 1
fq += query[prev:]
fm += match[prev:]
fs += subject[prev:]
return (fq, fm, fs)
def generate_parts(query, match, subject, ignore_under=3):
region_q = []
region_m = []
region_s = []
(query, match, subject) = __remove_query_gaps(query, match, subject)
region_start = -1
region_end = -1
mismatch_count = 0
for i, (q, m, s) in enumerate(zip(query, match, subject)):
# If we have a match
if m != ' ' or m == '+':
if region_start == -1:
region_start = i
# It's a new region, we need to reset or it's pre-seeded with
# spaces
region_q = []
region_m = []
region_s = []
region_end = i
mismatch_count = 0
else:
mismatch_count += 1
region_q.append(q)
region_m.append(m)
region_s.append(s)
if mismatch_count >= ignore_under and region_start != -1 and region_end != -1:
region_q = region_q[0:-ignore_under]
region_m = region_m[0:-ignore_under]
region_s = region_s[0:-ignore_under]
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
region_q = []
region_m = []
region_s = []
region_start = -1
region_end = -1
mismatch_count = 0
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
def _qms_to_matches(query, match, subject, strict_m=True):
matchline = []
for (q, m, s) in zip(query, match, subject):
ret = ''
if m != ' ' or m == '+':
ret = '='
elif m == ' ':
if q == '-':
ret = 'D'
elif s == '-':
ret = 'I'
else:
ret = 'X'
else:
log.warn("Bad data: \n\t%s\n\t%s\n\t%s\n" % (query, match, subject))
if strict_m:
if ret == '=' or ret == 'X':
ret = 'M'
matchline.append(ret)
return matchline
def _matchline_to_cigar(matchline):
cigar_line = []
last_char = matchline[0]
count = 0
for char in matchline:
if char == last_char:
count += 1
else:
cigar_line.append("%s%s" % (last_char, count))
count = 1
last_char = char
cigar_line.append("%s%s" % (last_char, count))
return ' '.join(cigar_line)
def cigar_from_string(query, match, subject, strict_m=True):
matchline = _qms_to_matches(query, match, subject, strict_m=strict_m)
if len(matchline) > 0:
return _matchline_to_cigar(matchline)
else:
return ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Blast XML to gapped GFF3', epilog='')
parser.add_argument('blastxml', type=file, help='Blast XML Output')
parser.add_argument('--min_gap', type=int, help='Maximum gap size before generating a new match_part', default=3)
parser.add_argument('--trim', action='store_true', help='Trim blast hits to be only as long as the parent feature')
parser.add_argument('--trim_end', action='store_true', help='Cut blast results off at end of gene')
args = parser.parse_args()
result = blastxml2gff3(**vars(args))
GFF.write(result, sys.stdout)
Vary match name based on reported application
#!/usr/bin/perl
import re
import sys
import copy
import argparse
from BCBio import GFF
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(name='blastxml2gff3')
__author__ = "Eric Rasche"
__version__ = "0.4.0"
__maintainer__ = "Eric Rasche"
__email__ = "esr@tamu.edu"
__doc__ = """
BlastXML files, when transformed to GFF3, do not normally show gaps in the
blast hits. This tool aims to fill that "gap".
"""
def blastxml2gff3(blastxml, min_gap=3, trim=False, trim_end=False):
from Bio.Blast import NCBIXML
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation
blast_records = NCBIXML.parse(blastxml)
records = []
for record in blast_records:
match_type = { # Currently we can only handle BLASTN, BLASTP
'BLASTN': 'nucleotide_match',
'BLASTP': 'protein_match',
}.get(record.application, 'match')
rec = SeqRecord(Seq("ACTG"), id=record.query)
for hit in record.alignments:
for hsp in hit.hsps:
qualifiers = {
"source": "blast",
"score": hsp.expect,
"accession": hit.accession,
"hit_id": hit.hit_id,
"length": hit.length,
"hit_titles": hit.title.split(' >')
}
desc = hit.title.split(' >')[0]
qualifiers['description'] = desc[desc.index(' '):]
# This required a fair bit of sketching out/match to figure out
# the first time.
#
# the match_start location must account for queries and
# subjecst that start at locations other than 1
parent_match_start = hsp.query_start - hsp.sbjct_start
# The end is the start + hit.length because the match itself
# may be longer than the parent feature, so we use the supplied
# subject/hit length to calculate the real ending of the target
# protein.
parent_match_end = hsp.query_start + hit.length + hsp.query.count('-')
# However, if the user requests that we trim the feature, then
# we need to cut the ``match`` start to 0 to match the parent feature.
# We'll also need to cut the end to match the query's end. It (maybe)
# should be the feature end? But we don't have access to that data, so
# We settle for this.
if trim:
if parent_match_start < 1:
parent_match_start = 0
if trim or trim_end:
if parent_match_end > hsp.query_end:
parent_match_end = hsp.query_end + 1
# The ``match`` feature will hold one or more ``match_part``s
top_feature = SeqFeature(
FeatureLocation(parent_match_start, parent_match_end),
type=match_type, strand=0,
qualifiers=qualifiers
)
# Unlike the parent feature, ``match_part``s have sources.
part_qualifiers = {
"source": "blast",
}
top_feature.sub_features = []
for start, end, cigar in generate_parts(hsp.query, hsp.match,
hsp.sbjct,
ignore_under=min_gap):
part_qualifiers['Gap'] = cigar
part_qualifiers['ID'] = hit.hit_id
if trim:
# If trimming, then we start relative to the
# match's start
match_part_start = parent_match_start + start
else:
# Otherwise, we have to account for the subject start's location
match_part_start = parent_match_start + hsp.sbjct_start + start - 1
# We used to use hsp.align_length here, but that includes
# gaps in the parent sequence
#
# Furthermore align_length will give calculation errors in weird places
# So we just use (end-start) for simplicity
match_part_end = match_part_start + (end - start)
top_feature.sub_features.append(
SeqFeature(
FeatureLocation(match_part_start, match_part_end),
type="match_part", strand=0,
qualifiers=copy.deepcopy(part_qualifiers))
)
rec.features.append(top_feature)
rec.annotations = {}
records.append(rec)
return records
def __remove_query_gaps(query, match, subject):
"""remove positions in all three based on gaps in query
In order to simplify math and calculations...we remove all of the gaps
based on gap locations in the query sequence::
Q:ACTG-ACTGACTG
S:ACTGAAC---CTG
will become::
Q:ACTGACTGACTG
S:ACTGAC---CTG
which greatly simplifies the process of identifying the correct location
for a match_part
"""
prev = 0
fq = ''
fm = ''
fs = ''
for position in re.finditer('-', query):
fq += query[prev:position.start()]
fm += match[prev:position.start()]
fs += subject[prev:position.start()]
prev = position.start() + 1
fq += query[prev:]
fm += match[prev:]
fs += subject[prev:]
return (fq, fm, fs)
def generate_parts(query, match, subject, ignore_under=3):
region_q = []
region_m = []
region_s = []
(query, match, subject) = __remove_query_gaps(query, match, subject)
region_start = -1
region_end = -1
mismatch_count = 0
for i, (q, m, s) in enumerate(zip(query, match, subject)):
# If we have a match
if m != ' ' or m == '+':
if region_start == -1:
region_start = i
# It's a new region, we need to reset or it's pre-seeded with
# spaces
region_q = []
region_m = []
region_s = []
region_end = i
mismatch_count = 0
else:
mismatch_count += 1
region_q.append(q)
region_m.append(m)
region_s.append(s)
if mismatch_count >= ignore_under and region_start != -1 and region_end != -1:
region_q = region_q[0:-ignore_under]
region_m = region_m[0:-ignore_under]
region_s = region_s[0:-ignore_under]
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
region_q = []
region_m = []
region_s = []
region_start = -1
region_end = -1
mismatch_count = 0
yield region_start, region_end + 1, \
cigar_from_string(region_q, region_m, region_s, strict_m=True)
def _qms_to_matches(query, match, subject, strict_m=True):
matchline = []
for (q, m, s) in zip(query, match, subject):
ret = ''
if m != ' ' or m == '+':
ret = '='
elif m == ' ':
if q == '-':
ret = 'D'
elif s == '-':
ret = 'I'
else:
ret = 'X'
else:
log.warn("Bad data: \n\t%s\n\t%s\n\t%s\n" % (query, match, subject))
if strict_m:
if ret == '=' or ret == 'X':
ret = 'M'
matchline.append(ret)
return matchline
def _matchline_to_cigar(matchline):
cigar_line = []
last_char = matchline[0]
count = 0
for char in matchline:
if char == last_char:
count += 1
else:
cigar_line.append("%s%s" % (last_char, count))
count = 1
last_char = char
cigar_line.append("%s%s" % (last_char, count))
return ' '.join(cigar_line)
def cigar_from_string(query, match, subject, strict_m=True):
matchline = _qms_to_matches(query, match, subject, strict_m=strict_m)
if len(matchline) > 0:
return _matchline_to_cigar(matchline)
else:
return ""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert Blast XML to gapped GFF3', epilog='')
parser.add_argument('blastxml', type=file, help='Blast XML Output')
parser.add_argument('--min_gap', type=int, help='Maximum gap size before generating a new match_part', default=3)
parser.add_argument('--trim', action='store_true', help='Trim blast hits to be only as long as the parent feature')
parser.add_argument('--trim_end', action='store_true', help='Cut blast results off at end of gene')
args = parser.parse_args()
result = blastxml2gff3(**vars(args))
GFF.write(result, sys.stdout)
|
try:
from xml.etree import cElementTree as ET
except ImportError as e:
from xml.etree import ElementTree as ET
roadtypes = {'motorway', 'trunk', 'primary', 'secondary', 'tertiary',
'unclassified', 'residential', 'minor', 'service'
}
def roadp(element):
if element.tag == 'way':
for item in element:
if (item.tag == 'tag' and item.attrib['k']=="highway"):
return item.attrib['v'] in roadtypes
return False
def namep(element):
if element.tag == 'way':
for item in element:
if (item.tag == 'tag' and item.attrib['k']=="name" and len(item.attrib['v'])>1):
return True
return False
def node_increment(node_id, way):
node_position = -1;
index=0;
for element in way:
if element.tag == 'nd':
if node_id == element.attrib['ref']:
node_position = index
index += 1
if node_position == 0 or node_position == index-1:
return 1
else:
return 2
# Class for storing the detected crossings prior to clustering
class Crossing:
counter = 0; # Number of crossing ways weighted with endpoints
node = None
ways = []
def __init__(self, id, way1, way2):
self.ways = []
self.addroad(way1, id)
self.addroad(way2, id)
def addroad(self, way, id):
self.ways.append(way)
self.counter += node_increment(id, way)
def addnode(self, node):
self.node = node
def lat(self):
return self.node.attrib['lat']
def long(self):
# print self.node.attrib['long']
return self.node.attrib['lon']
def coordinate_string(self):
return self.lat() + ',' + self.long()
def properCrossing(self):
return self.counter > 2
def closeBy(self, other, e = 0.0001):
return abs(float(self.lat()) - float(other.lat())) < e and abs(float(self.long()) - float(other.long())) < e
def closeByCrossings(self, crossings):
result = []
for c in crossings:
cr = crossings[c]
if cr != self and self.closeBy(cr):
result.append(cr)
return result
def extract_intersections(osm, verbose=True):
# This function takes an osm file as an input. It then goes through each xml
# element and searches for nodes that are shared by two or more ways.
# Parameter:
# - osm: An xml file that contains OpenStreetMap's map information
# - verbose: If true, print some outputs to terminal.
#
# Ex) extract_intersections('WashingtonDC.osm')
#
tree = ET.parse(osm)
root = tree.getroot()
encountered = {} # What kind of nodes have been encountered
crossings = {}
for child in root:
if roadp(child) and namep(child):
for item in child:
if item.tag == 'nd':
nd_ref = item.attrib['ref']
if not nd_ref in encountered: # Encountered the first time
encountered[nd_ref] = child
elif not nd_ref in crossings: # Encountered the second time
crossings[nd_ref] = Crossing(nd_ref, child, encountered[nd_ref])
else: # After the second time
crossings[nd_ref].addroad(child, item)
for child in root:
if child.tag == "node" and child.attrib['id'] in crossings:
crossings[child.attrib['id']].addnode(child)
# Find nodes that are shared with more than one way, which
# might correspond to intersections
#intersections = filter(lambda x: counter[x] > 2, counter)
crossings = dict(filter(lambda k: crossings[k[0]].properCrossing(), crossings.items()))
# Extract intersection coordinates
# You can plot the result using this url.
# http://www.darrinward.com/lat-long/
print(len(crossings))
return crossings
def print_coordinates(osm):
c = extract_intersections(osm)
for id in c:
print(c[id].coordinate_string())
def print_crossings(osm):
c = extract_intersections(osm)
for id in c:
print(c[id].coordinate_string() + ' - ' + str(len(c[id].ways)) + '(' +str(c[id].counter) + ')')
for w in c[id].ways:
for t in w:
if t.tag == 'tag' and t.attrib['k'] == 'name':
print(' %r' % t.attrib['v'])
for n in c[id].closeByCrossings(c):
print(' %r' % n)
for w in n.ways:
for t in w:
if t.tag == 'tag' and t.attrib['k'] == 'name':
print(' %r' % t.attrib['v'])
print_coordinates("../data/otaniemi.osm")
Added clustering of crossings
try:
from xml.etree import cElementTree as ET
except ImportError as e:
from xml.etree import ElementTree as ET
import math
import copy
roadtypes = {'motorway', 'trunk', 'primary', 'secondary', 'tertiary',
'unclassified', 'residential', 'minor', 'service'
}
def isRoad(element):
if element.tag == 'way':
for item in element:
if (item.tag == 'tag' and item.attrib['k']=="highway"):
return item.attrib['v'] in roadtypes
return False
def wayName(way):
if way.tag == 'way':
for item in way:
if item.tag == 'tag' and item.attrib['k']=="name":
return item.attrib['v']
def wayHasName(element):
n = wayName(element)
if n != None:
return len(n)>1
return False
def node_increment(node_id, way):
node_position = -1;
index=0;
for element in way:
if element.tag == 'nd':
if node_id == element.attrib['ref']:
node_position = index
index += 1
if node_position == 0 or node_position == index-1:
return 1
else:
return 2
def pointId(lat, lon):
return str(math.floor(10000*lat))+str(math.floor(10000*lon))
# Class for storing the detected crossings prior to clustering
class Crossing:
id = None
lat = 0.0
lon = 0.0
ways = {}
counter = 0; # Number of crossing ways weighted with endpoints
node = None
fake = False # Crossing with only one road name (continuation of same road, division into two unidirectionals)
obsoletedBy = None
replacesCrossings = []
def __init__(self, id, lat, lon, ways):
self.id = id
self.lat = lat
self.lon = lon
self.ways = set()
for w in ways:
self.addroad(w, id)
self.fake = self.fakeCrossing()
def addroad(self, way, id):
self.ways.add(way)
self.counter += node_increment(id, way)
def addnode(self, node):
self.node = node
self.lat = node.attrib['lat']
self.lon = node.attrib['lon']
def coordinate_string(self):
return str(self.lat) + ',' + str(self.lon)
def properCrossing(self):
# return self.counter > 2
# return self.counter > 1 and not self.fakeCrossing()
return self.counter > 1
def closeBy(self, other, e = 0.00018):
return abs(float(self.lat) - float(other.lat)) < e and abs(float(self.lon) - float(other.lon)) < e
def closeByCrossings(self, crossings):
result = []
for c in crossings:
cr = crossings[c]
if cr != self and cr.obsoletedBy == None and not cr.fakeCrossing() and self.closeBy(cr):
result.append(cr)
return result
def wayNames(self):
return set(map(wayName, self.ways))
def printCoordinates(self, e="\n"):
print(self.coordinate_string(), end=e)
def printCrossing(self, crossings):
if self.fakeCrossing():
print("Fake crossing: ", end='')
self.printCoordinates(e='')
print(self.wayNames())
def fakeCrossing(self):
return len(self.wayNames()) == 1
def clusterableWith(self, crossing):
return len(self.wayNames() & crossing.wayNames()) > 1
def clusterCrossingWith(self, crossing, crossingMap, newCrossingMap):
print('Trying to cluster: ', end='')
self.printCrossing(crossingMap)
print('with the crossing: ', end='')
crossing.printCrossing(crossingMap)
if self.obsoletedBy == None and not self.fakeCrossing() and crossing.obsoletedBy == None and not crossing.fakeCrossing():
lat = (float(self.lat) + float(crossing.lat)) / 2
lon = (float(self.lon) + float(crossing.lon)) / 2
id = pointId(lat, lon)
c = Crossing(id, lat, lon, self.ways.union(crossing.ways))
print('Clustering result: ', end='')
c.printCrossing(crossingMap)
print('--')
newCrossingMap[id] = c
self.obsoletedBy = c
crossing.obsoletedBy = c
return c
return self
def clusterCrossing(self, crossingMap, newCrossingMap):
if self.obsoletedBy == None and not self.fakeCrossing():
for n in self.closeByCrossings(crossingMap):
if self.clusterableWith(n):
c = self.clusterCrossingWith(n, crossingMap, newCrossingMap)
return c.clusterCrossing(crossingMap, newCrossingMap)
else:
return self
def extract_intersections(osm, verbose=True):
# This function takes an osm file as an input. It then goes through each xml
# element and searches for nodes that are shared by two or more ways.
# Parameter:
# - osm: An xml file that contains OpenStreetMap's map information
# - verbose: If true, print some outputs to terminal.
#
# Ex) extract_intersections('WashingtonDC.osm')
#
tree = ET.parse(osm)
root = tree.getroot()
encountered = {} # What kind of nodes have been encountered
crossings = {}
for child in root:
if isRoad(child) and wayHasName(child):
for item in child:
if item.tag == 'nd':
nd_ref = item.attrib['ref']
if not nd_ref in encountered: # Encountered the first time
encountered[nd_ref] = child
elif not nd_ref in crossings: # Encountered the second time
crossings[nd_ref] = Crossing(nd_ref, 0.0, 0.0, {child, encountered[nd_ref]})
else: # After the second time
crossings[nd_ref].addroad(child, item)
for child in root:
if child.tag == "node" and child.attrib['id'] in crossings:
crossings[child.attrib['id']].addnode(child)
crossings = dict(filter(lambda k: crossings[k[0]].properCrossing(), crossings.items()))
# print(len(crossings))
return crossings
def cluster_crossings(crossings):
clusteredCrossings = {}
for item in crossings.items():
item[1].clusterCrossing(crossings, clusteredCrossings)
return clusteredCrossings
def print_coordinates(osm):
c = extract_intersections(osm)
clusters = cluster_crossings(c)
print('Crossings')
for id in c:
c[id].printCoordinates()
print('Clustered crossings')
for id in clusters:
if clusters[id].obsoletedBy == None:
clusters[id].printCoordinates()
def print_crossings(osm):
c = extract_intersections(osm)
for id in c:
c[id].printCrossing(c)
for n in c[id].closeByCrossings(c):
n.printCrossing(c)
print_coordinates("../data/otaniemi.osm")
|
# Standard Library
import datetime
import logging
import requests
# Django
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.apps import apps
from django.conf import settings
from apps.bhs.tasks import update_person_from_membercenter
from apps.bhs.tasks import update_group_from_membercenter
from apps.bhs.tasks import update_group_owners_from_membercenter
# First-Party
User = get_user_model()
Person = apps.get_model('bhs.person')
Group = apps.get_model('bhs.group')
log = logging.getLogger('updater')
class Command(BaseCommand):
help = "Command to sync with Member Center database."
def add_arguments(self, parser):
parser.add_argument(
'--days',
type=int,
dest='days',
nargs='?',
const=1,
help='Number of days to update.',
)
parser.add_argument(
'--hours',
type=int,
dest='hours',
nargs='?',
const=1,
help='Number of hours to update.',
)
parser.add_argument(
'--minutes',
type=int,
dest='minutes',
nargs='?',
const=1,
help='Number of hours to update.',
)
def handle(self, *args, **options):
# Set Cursor
if options['days']:
cursor = timezone.now() - datetime.timedelta(days=options['days'], hours=1)
elif options['hours']:
cursor = timezone.now() - datetime.timedelta(hours=options['hours'], minutes=5)
elif options['minutes']:
cursor = timezone.now() - datetime.timedelta(minutes=options['minutes'], seconds=5)
else:
cursor = None
# Sync Persons
self.stdout.write("Fetching Persons from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/person".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'modified__gt': cursor,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Persons...".format(i, t), ending='\r')
update_person_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Persons.".format(t))
# if not cursor:
# humans = list(Human.objects.values_list('id', flat=True))
# self.stdout.write("Deleting Person orphans...")
# t = Person.objects.delete_orphans(humans)
# self.stdout.write("Deleted {0} Person orphans.".format(t))
# Sync Groups
self.stdout.write("Fetching Groups from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/group".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'modified__gt': cursor,
'kind__gt': 30,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Groups...".format(i, t), ending='\r')
update_group_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Groups.".format(t))
# Sync Roles
self.stdout.write("Fetching Officers from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/officer".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'modified__gt': cursor,
'group__kind__gt': 30,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Roles...".format(i, t), ending='\r')
update_group_owners_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Officers.".format(t))
self.stdout.write("Complete.")
Only update active records
# Standard Library
import datetime
import logging
import requests
# Django
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.apps import apps
from django.conf import settings
from apps.bhs.tasks import update_person_from_membercenter
from apps.bhs.tasks import update_group_from_membercenter
from apps.bhs.tasks import update_group_owners_from_membercenter
# First-Party
User = get_user_model()
Person = apps.get_model('bhs.person')
Group = apps.get_model('bhs.group')
log = logging.getLogger('updater')
class Command(BaseCommand):
help = "Command to sync with Member Center database."
def add_arguments(self, parser):
parser.add_argument(
'--days',
type=int,
dest='days',
nargs='?',
const=1,
help='Number of days to update.',
)
parser.add_argument(
'--hours',
type=int,
dest='hours',
nargs='?',
const=1,
help='Number of hours to update.',
)
parser.add_argument(
'--minutes',
type=int,
dest='minutes',
nargs='?',
const=1,
help='Number of hours to update.',
)
def handle(self, *args, **options):
# Set Cursor
if options['days']:
cursor = timezone.now() - datetime.timedelta(days=options['days'], hours=1)
elif options['hours']:
cursor = timezone.now() - datetime.timedelta(hours=options['hours'], minutes=5)
elif options['minutes']:
cursor = timezone.now() - datetime.timedelta(minutes=options['minutes'], seconds=5)
else:
cursor = None
# Sync Persons
self.stdout.write("Fetching Persons from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/person".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'status': Person.STATUS.active,
'modified__gt': cursor,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Persons...".format(i, t), ending='\r')
update_person_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Persons.".format(t))
# if not cursor:
# humans = list(Human.objects.values_list('id', flat=True))
# self.stdout.write("Deleting Person orphans...")
# t = Person.objects.delete_orphans(humans)
# self.stdout.write("Deleted {0} Person orphans.".format(t))
# Sync Groups
self.stdout.write("Fetching Groups from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/group".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'status': Group.STATUS.active,
'kind__gt': Group.KIND.chapter,
'modified__gt': cursor,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Groups...".format(i, t), ending='\r')
update_group_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Groups.".format(t))
# Sync Roles
self.stdout.write("Fetching Officers from Member Center...")
endpoint, _, token = settings.MEMBERCENTER_URL.partition('@')
url = "{0}/bhs/officer".format(endpoint)
headers = {
'Authorization': 'Token {0}'.format(token)
}
page = 1
params = {
'group__status': Group.STATUS.active,
'group__kind__gt': Group.KIND.chapter,
'modified__gt': cursor,
'page': page,
}
response = requests.get(
url,
headers=headers,
params=params,
).json()
t = response['meta']['pagination']['count']
if t:
i = 0
pages = response['meta']['pagination']['pages']
while page <= pages:
response = requests.get(
url,
headers=headers,
params=params,
).json()
items = response['data']
for item in items:
i += 1
self.stdout.flush()
self.stdout.write("Updating {0} of {1} Roles...".format(i, t), ending='\r')
update_group_owners_from_membercenter.delay(item)
page += 1
params['page'] = page
self.stdout.write("")
self.stdout.write("Updated {0} Officers.".format(t))
self.stdout.write("Complete.")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013+ James Shubin
# Written by James Shubin <james@shubin.ca>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# EXAMPLE:
# $ gluster peer status --xml | ./xml.py connected <PEER1> <PEER2> <PEERn>
# <BOOL>
# EXAMPLE:
# $ gluster volume info --xml <VOLNAME> | ./xml.py property --key <KEY>
# <VALUE>
# EXAMPLE:
# $ gluster volume status --xml [<VOLNAME>] | ./xml.py port --volume <VOLUME> --host <HOST> --path <PATH>
# <PORT>
# EXAMPLE:
# $ gluster volume status --xml [<VOLNAME>] | ./xml.py ports [--volume <VOLUME>] [--host <HOST>]
# <PORT1>[,<PORT2>[,<PORTn>]]
import sys
import argparse
import lxml.etree as etree
parser = argparse.ArgumentParser(description='gluster xml parsing tools')
#parser.add_argument('--debug', dest='debug', action='store_true', default=False)
subparsers = parser.add_subparsers(dest='mode')
#
# 'connected' parser
#
parser_connected = subparsers.add_parser('connected')
parser_connected.add_argument('peers', type=str, nargs='*', action='store')
#
# 'property' parser
#
parser_property = subparsers.add_parser('property')
parser_property.add_argument('--key', dest='key', action='store')
#
# 'port' parser
#
parser_port = subparsers.add_parser('port')
parser_port.add_argument('--volume', dest='volume', action='store', required=True)
parser_port.add_argument('--host', dest='host', action='store', required=True)
parser_port.add_argument('--path', dest='path', action='store', required=True)
#
# 'ports' parser
#
parser_ports = subparsers.add_parser('ports')
parser_ports.add_argument('--volume', dest='volume', action='store', required=False)
parser_ports.add_argument('--host', dest='host', action='store', required=False)
#
# final setup...
#
args = parser.parse_args()
tree = etree.parse(sys.stdin)
root = tree.getroot()
# are all the hostnames in argv connected ?
if args.mode == 'connected':
store = {}
peers = args.peers
for i in root.findall('.//peerStatus'):
p = i.find('peer')
h = p.find('hostname').text
c = (str(p.find('connected').text) == '1') # connected
store[h] = c # save for later...
# if no peers specified, assume we should check all...
if len(peers) == 0:
peers = store.keys()
for i in peers:
if i in store.keys():
if not store[i]:
# someone is unconnected
sys.exit(1)
else:
# we're looking for a peer that isn't peered yet
sys.exit(2)
# must be good!
sys.exit(0)
elif args.mode == 'property':
store = []
for i in root.findall('.//option'):
if str(i.find('name').text) == args.key:
store.append(i.find('value').text)
if len(store) == 1:
print(store[0])
sys.exit(0)
else: # more than one value found
sys.exit(1)
elif args.mode == 'port':
port = 0
found = False
#print args.volume # volume
#print args.host # hostname
#print args.path # path
for i in root.findall('.//volumes'):
for j in i.findall('.//volume'):
v = str(j.find('volName').text)
#print v
for k in j.findall('.//node'):
h = str(k.find('hostname').text)
p = str(k.find('path').text)
#print h, p
#if v == args.volume and h == args.host and p == args.path:
if (v, h, p) == (args.volume, args.host, args.path):
if found:
# we have already found a match.
# there's a bug somewhere...
sys.exit(2)
found = True
port = int(k.find('port').text)
if found and port > 0:
print(port)
sys.exit(0)
else: # no value found
sys.exit(1)
# list all the ports used by one volume
elif args.mode == 'ports':
ports = []
found = False
#print args.volume # volume (optional)
for i in root.findall('.//volumes'):
for j in i.findall('.//volume'):
v = str(j.find('volName').text)
#print v
# if no volume is specified, we use all of them...
if args.volume is None or args.volume == v:
for k in j.findall('.//node'):
h = str(k.find('hostname').text)
p = str(k.find('path').text)
#print h, p
if args.host is None or args.host == h:
try:
ports.append(int(k.find('port').text))
found = True
except ValueError, e:
pass
if found and len(ports) > 0:
# NOTE: you may get duplicates if you lookup multiple hosts...
# here we remove any duplicates and convert each int to strings
print(','.join([str(x) for x in list(set(ports))]))
sys.exit(0)
else: # no value found
sys.exit(1)
# else:
sys.exit(3)
# vim: ts=8
Also verify peer state for connection tool.
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013+ James Shubin
# Written by James Shubin <james@shubin.ca>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# EXAMPLE:
# $ gluster peer status --xml | ./xml.py connected <PEER1> <PEER2> <PEERn>
# <BOOL>
# EXAMPLE:
# $ gluster volume info --xml <VOLNAME> | ./xml.py property --key <KEY>
# <VALUE>
# EXAMPLE:
# $ gluster volume status --xml [<VOLNAME>] | ./xml.py port --volume <VOLUME> --host <HOST> --path <PATH>
# <PORT>
# EXAMPLE:
# $ gluster volume status --xml [<VOLNAME>] | ./xml.py ports [--volume <VOLUME>] [--host <HOST>]
# <PORT1>[,<PORT2>[,<PORTn>]]
import sys
import argparse
import lxml.etree as etree
# List of state codes:
# <MESSAGE> <CODE>
# static char *glusterd_friend_sm_state_names[] = { # glusterd-sm.c
# "Establishing Connection", # 0
# "Probe Sent to Peer", # 1
# "Probe Received from Peer", # 2
# "Peer in Cluster", # 3 (verified)
# "Accepted peer request", # 4
# "Sent and Received peer request", # 5
# "Peer Rejected", # 6 (verified)
# "Peer detach in progress", # 7
# "Probe Received from peer", # 8
# "Connected to Peer", # 9
# "Peer is connected and Accepted", # 10
# "Invalid State" # 11
# };
valid_peered = ['3']
parser = argparse.ArgumentParser(description='gluster xml parsing tools')
#parser.add_argument('--debug', dest='debug', action='store_true', default=False)
subparsers = parser.add_subparsers(dest='mode')
#
# 'connected' parser
#
parser_connected = subparsers.add_parser('connected')
parser_connected.add_argument('peers', type=str, nargs='*', action='store')
#
# 'property' parser
#
parser_property = subparsers.add_parser('property')
parser_property.add_argument('--key', dest='key', action='store')
#
# 'port' parser
#
parser_port = subparsers.add_parser('port')
parser_port.add_argument('--volume', dest='volume', action='store', required=True)
parser_port.add_argument('--host', dest='host', action='store', required=True)
parser_port.add_argument('--path', dest='path', action='store', required=True)
#
# 'ports' parser
#
parser_ports = subparsers.add_parser('ports')
parser_ports.add_argument('--volume', dest='volume', action='store', required=False)
parser_ports.add_argument('--host', dest='host', action='store', required=False)
#
# final setup...
#
args = parser.parse_args()
tree = etree.parse(sys.stdin)
root = tree.getroot()
# are all the hostnames in argv connected ?
if args.mode == 'connected':
store = {}
peers = args.peers
for i in root.findall('.//peerStatus'):
p = i.find('peer')
h = p.find('hostname').text
c = (str(p.find('connected').text) == '1') # connected...?
s = (str(p.find('state').text) in valid_peered) # valid peering
store[h] = c and s # save for later...
# if no peers specified, assume we should check all...
if len(peers) == 0:
peers = store.keys()
for i in peers:
if i in store.keys():
if not store[i]:
# someone is unconnected
sys.exit(1)
else:
# we're looking for a peer that isn't peered yet
sys.exit(2)
# must be good!
sys.exit(0)
elif args.mode == 'property':
store = []
for i in root.findall('.//option'):
if str(i.find('name').text) == args.key:
store.append(i.find('value').text)
if len(store) == 1:
print(store[0])
sys.exit(0)
else: # more than one value found
sys.exit(1)
elif args.mode == 'port':
port = 0
found = False
#print args.volume # volume
#print args.host # hostname
#print args.path # path
for i in root.findall('.//volumes'):
for j in i.findall('.//volume'):
v = str(j.find('volName').text)
#print v
for k in j.findall('.//node'):
h = str(k.find('hostname').text)
p = str(k.find('path').text)
#print h, p
#if v == args.volume and h == args.host and p == args.path:
if (v, h, p) == (args.volume, args.host, args.path):
if found:
# we have already found a match.
# there's a bug somewhere...
sys.exit(2)
found = True
port = int(k.find('port').text)
if found and port > 0:
print(port)
sys.exit(0)
else: # no value found
sys.exit(1)
# list all the ports used by one volume
elif args.mode == 'ports':
ports = []
found = False
#print args.volume # volume (optional)
for i in root.findall('.//volumes'):
for j in i.findall('.//volume'):
v = str(j.find('volName').text)
#print v
# if no volume is specified, we use all of them...
if args.volume is None or args.volume == v:
for k in j.findall('.//node'):
h = str(k.find('hostname').text)
p = str(k.find('path').text)
#print h, p
if args.host is None or args.host == h:
try:
ports.append(int(k.find('port').text))
found = True
except ValueError, e:
pass
if found and len(ports) > 0:
# NOTE: you may get duplicates if you lookup multiple hosts...
# here we remove any duplicates and convert each int to strings
print(','.join([str(x) for x in list(set(ports))]))
sys.exit(0)
else: # no value found
sys.exit(1)
# else:
sys.exit(3)
# vim: ts=8
|
# TODO: code review of whole module
from collections import defaultdict
from datetime import timedelta
from django.db.models import Q, Sum
from kitabu.search.utils import Timeline
from kitabu.exceptions import ReservationValidationError
class Subjects(object):
"""Searcher for subjects available in certain time period.
Constructor is used to set subject model and possibly limit subjects on
which search should be performed.
Finite availablity means only certain number of reservations at a time is
possible.
"""
def __init__(self, subject_model, subject_manager=None):
self.subject_model = subject_model
self.reservation_model = subject_model.get_reservation_model()
if subject_manager:
self._subject_manager = subject_manager
def _get_subject_manager(self):
if not hasattr(self, '_subject_manager'):
self._subject_manager = self.subject_model.objects
return self._subject_manager
def _set_subject_manager(self, value):
self._subject_manager = value
subject_manager = property(_get_subject_manager, _set_subject_manager)
def search(self, start, end, required_size):
colliding_reservations = self.reservation_model.colliding_reservations_in_subjects(
start=start,
end=end,
subjects=self.subject_manager.all()
).select_related('subject')
timelines = defaultdict(lambda: defaultdict(lambda: 0))
for reservation in colliding_reservations:
reservation_start = max(start, reservation.start)
timelines[reservation.subject][reservation_start] += reservation.size
if reservation.end < end:
timelines[reservation.subject][reservation.end] -= reservation.size
disqualified_subjects = []
for subject, timeline in timelines.iteritems():
reservations_cnt = 0
max_reservations = 0
for moment in sorted(timeline.keys()):
reservations_cnt += timeline[moment]
if reservations_cnt > max_reservations:
max_reservations = reservations_cnt
if reservations_cnt + required_size > subject.size:
disqualified_subjects.append(subject.id)
break
return self.subject_manager.exclude(id__in=disqualified_subjects).filter(size__gte=required_size)
def valid_search(self, *args, **kwargs):
pre_results = self.search(*args, **kwargs)
final_results = []
for subject in pre_results:
reservation = subject.reservation_model(subject=subject, **kwargs)
try:
subject._validate_reservation(reservation)
if kwargs.get('exclusive') or subject._only_exclusive_reservations():
subject._validate_exclusive(reservation)
except ReservationValidationError:
pass
else:
final_results.append(subject)
return final_results
class ExclusivelyAvailableSubjects(Subjects):
"""Searcher to find subject available for exclusive reservation."""
def search(self, start, end):
colliding_reservations = self.reservation_model.colliding_reservations_in_subjects(
start=start,
end=end,
subjects=self.subject_manager.all()
)
disqualified_subjects = colliding_reservations.values('subject_id').distinct()
return self.subject_manager.exclude(id__in=disqualified_subjects)
class FindPeriod(object):
"""Searcher for subperiod with possible reservations.
To search on certain subject for a period when it is available.
E.g. to search for 7 days availability during May 2012.
"""
def search(self,
start,
end,
required_duration=timedelta(1),
subject=None,
required_size=0,
reservations=None
):
timeline = Timeline(start, end, subject, reservations)
available_size = subject.size if subject else 1
if not required_size:
required_size = available_size
available_dates = []
potential_start = timeline.start
current_date = timeline.end
current_size = 0
for current_date, delta in timeline:
current_size += delta
if current_size + required_size <= available_size:
if potential_start is None:
potential_start = current_date
elif potential_start:
if current_date - potential_start >= required_duration:
available_dates.append((potential_start, current_date))
potential_start = None
if (
potential_start is not None
and current_size + required_size <= available_size
and end - potential_start >= required_duration
):
available_dates.append((potential_start, end))
return available_dates
class Clusters(Subjects):
"""Searcher for clusters available in certain time period."""
def __init__(self, subject_model, cluster_model, subject_related_name='subjects', *args, **kwargs):
self.reservation_model = subject_model.get_reservation_model()
self.cluster_model = cluster_model
self.subject_related_name = subject_related_name
def _get_cluster_manager(self):
if not hasattr(self, '_cluster_model_manager'):
self._cluster_model_manager = self.cluster_model.objects
return self._cluster_model_manager
def _set_cluster_manager(self, value):
self._cluster_model_manager = value
cluster_manager = property(_get_cluster_manager, _set_cluster_manager)
def search(self, start, end, required_size):
clusters_with_size = self.cluster_manager.annotate(size=Sum(self.subject_related_name + '__size'))
clusters_with_size_dict = dict((cluster.id, cluster) for cluster in clusters_with_size)
colliding_reservations = self.reservation_model.colliding_reservations_in_clusters(
start=start,
end=end,
clusters=self.cluster_manager.all()
).select_related('subject')
timelines = defaultdict(lambda: defaultdict(lambda: 0))
disqualified_clusters = []
for reservation in colliding_reservations:
reservation_start = start if reservation.start < start else reservation.start
timelines[reservation.subject][reservation_start] += reservation.size
if reservation.end < end:
timelines[reservation.subject][reservation.end] -= reservation.size
for subject, timeline in timelines.iteritems():
reservations_cnt = 0
max_reservations = 0
for moment in sorted(timeline.keys()):
reservations_cnt += timeline[moment]
if reservations_cnt > max_reservations:
max_reservations = reservations_cnt
cluster = clusters_with_size_dict[subject.cluster_id]
cluster.size -= max_reservations
if cluster.size < required_size:
disqualified_clusters.append(subject.cluster_id)
return clusters_with_size.filter(~Q(id__in=disqualified_clusters), size__gte=required_size)
add extra size param to available subjects searcher
# TODO: code review of whole module
from collections import defaultdict
from datetime import timedelta
from django.db.models import Q, Sum
from kitabu.search.utils import Timeline
from kitabu.exceptions import ReservationValidationError
class Subjects(object):
"""Searcher for subjects available in certain time period.
Constructor is used to set subject model and possibly limit subjects on
which search should be performed.
Finite availablity means only certain number of reservations at a time is
possible.
"""
def __init__(self, subject_model, subject_manager=None):
self.subject_model = subject_model
self.reservation_model = subject_model.get_reservation_model()
if subject_manager:
self._subject_manager = subject_manager
def _get_subject_manager(self):
if not hasattr(self, '_subject_manager'):
self._subject_manager = self.subject_model.objects
return self._subject_manager
def _set_subject_manager(self, value):
self._subject_manager = value
subject_manager = property(_get_subject_manager, _set_subject_manager)
def search(self, start, end, required_size=None, size=None):
if required_size is None:
required_size = size
if required_size is None:
raise Exception('required_size or size must be provided')
colliding_reservations = self.reservation_model.colliding_reservations_in_subjects(
start=start,
end=end,
subjects=self.subject_manager.all()
).select_related('subject')
timelines = defaultdict(lambda: defaultdict(lambda: 0))
for reservation in colliding_reservations:
reservation_start = max(start, reservation.start)
timelines[reservation.subject][reservation_start] += reservation.size
if reservation.end < end:
timelines[reservation.subject][reservation.end] -= reservation.size
disqualified_subjects = []
for subject, timeline in timelines.iteritems():
reservations_cnt = 0
max_reservations = 0
for moment in sorted(timeline.keys()):
reservations_cnt += timeline[moment]
if reservations_cnt > max_reservations:
max_reservations = reservations_cnt
if reservations_cnt + required_size > subject.size:
disqualified_subjects.append(subject.id)
break
return self.subject_manager.exclude(id__in=disqualified_subjects).filter(size__gte=required_size)
def valid_search(self, *args, **kwargs):
pre_results = self.search(*args, **kwargs)
final_results = []
for subject in pre_results:
reservation = subject.reservation_model(subject=subject, **kwargs)
try:
subject._validate_reservation(reservation)
if kwargs.get('exclusive') or subject._only_exclusive_reservations():
subject._validate_exclusive(reservation)
except ReservationValidationError:
pass
else:
final_results.append(subject)
return final_results
class ExclusivelyAvailableSubjects(Subjects):
"""Searcher to find subject available for exclusive reservation."""
def search(self, start, end):
colliding_reservations = self.reservation_model.colliding_reservations_in_subjects(
start=start,
end=end,
subjects=self.subject_manager.all()
)
disqualified_subjects = colliding_reservations.values('subject_id').distinct()
return self.subject_manager.exclude(id__in=disqualified_subjects)
class FindPeriod(object):
"""Searcher for subperiod with possible reservations.
To search on certain subject for a period when it is available.
E.g. to search for 7 days availability during May 2012.
"""
def search(self,
start,
end,
required_duration=timedelta(1),
subject=None,
required_size=0,
reservations=None
):
timeline = Timeline(start, end, subject, reservations)
available_size = subject.size if subject else 1
if not required_size:
required_size = available_size
available_dates = []
potential_start = timeline.start
current_date = timeline.end
current_size = 0
for current_date, delta in timeline:
current_size += delta
if current_size + required_size <= available_size:
if potential_start is None:
potential_start = current_date
elif potential_start:
if current_date - potential_start >= required_duration:
available_dates.append((potential_start, current_date))
potential_start = None
if (
potential_start is not None
and current_size + required_size <= available_size
and end - potential_start >= required_duration
):
available_dates.append((potential_start, end))
return available_dates
class Clusters(Subjects):
"""Searcher for clusters available in certain time period."""
def __init__(self, subject_model, cluster_model, subject_related_name='subjects', *args, **kwargs):
self.reservation_model = subject_model.get_reservation_model()
self.cluster_model = cluster_model
self.subject_related_name = subject_related_name
def _get_cluster_manager(self):
if not hasattr(self, '_cluster_model_manager'):
self._cluster_model_manager = self.cluster_model.objects
return self._cluster_model_manager
def _set_cluster_manager(self, value):
self._cluster_model_manager = value
cluster_manager = property(_get_cluster_manager, _set_cluster_manager)
def search(self, start, end, required_size):
clusters_with_size = self.cluster_manager.annotate(size=Sum(self.subject_related_name + '__size'))
clusters_with_size_dict = dict((cluster.id, cluster) for cluster in clusters_with_size)
colliding_reservations = self.reservation_model.colliding_reservations_in_clusters(
start=start,
end=end,
clusters=self.cluster_manager.all()
).select_related('subject')
timelines = defaultdict(lambda: defaultdict(lambda: 0))
disqualified_clusters = []
for reservation in colliding_reservations:
reservation_start = start if reservation.start < start else reservation.start
timelines[reservation.subject][reservation_start] += reservation.size
if reservation.end < end:
timelines[reservation.subject][reservation.end] -= reservation.size
for subject, timeline in timelines.iteritems():
reservations_cnt = 0
max_reservations = 0
for moment in sorted(timeline.keys()):
reservations_cnt += timeline[moment]
if reservations_cnt > max_reservations:
max_reservations = reservations_cnt
cluster = clusters_with_size_dict[subject.cluster_id]
cluster.size -= max_reservations
if cluster.size < required_size:
disqualified_clusters.append(subject.cluster_id)
return clusters_with_size.filter(~Q(id__in=disqualified_clusters), size__gte=required_size)
|
#
# Copyright (c) 2008 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Types used internally by the rBuild implementation.
As with all internal components, these interfaces are subject to
change.
"""
import weakref
def findPropCaller(descr, othercls):
"""
Figure out what attribute a descriptor was accessed as.
NOTE: When using a descriptor that utilizes this function, never
use the same instance of the descriptor for multiple attributes,
or this function will return the wrong name!
@param descr: Descriptor to find the binding for
@param othercls: Class whose MRO will be searched
"""
for cls in othercls.__mro__:
for key, value in cls.__dict__.iteritems():
if value is descr:
return key
# Nothing in the object's MRO references this descriptor.
raise AssertionError("invalid descriptor call")
class AttributeHook(object):
"""
A property descriptor that "hooks" all assignments.
Before an assignment, a method is invoked on the new value (if it
is not C{None}), with the parent object as the only argument.
Attribute fetches are passed through. Deletions will assign C{None}.
This is a data descriptor.
@param attribute: The attribute to invoke on the new value before
the assignment is performed.
@type attribute: C{basestring}
"""
__slots__ = ['attribute']
def __init__(self, attribute):
self.attribute = attribute
def _attr(self, cls):
"""
Get the called attribute name.
@param cls: Owner class
"""
return findPropCaller(self, cls)
def __get__(self, obj, cls):
"""
Pass-through; should behave identically to a descriptorless fetch.
"""
prop = self._attr(cls)
if obj:
return obj.__dict__.get(prop, None)
else:
return self
def __set__(self, obj, value):
"""
If C{value} is not C{None}, invoke a pre-hook on that value.
"""
if value is not None:
getattr(value, self.attribute)(obj)
prop = self._attr(type(obj))
obj.__dict__[prop] = value
def __delete__(self, obj):
"""
Assign C{None}.
"""
prop = self._attr(type(obj))
obj.__dict__[prop] = None
class WeakReference(object):
"""
A property descriptor that transparently weak references its value.
Upon assignment, a weak reference is created, and further accesses
dereference it automatically. The default value is always C{None}.
The generated weak reference can be accessed by appending "_ref" to
the name this descriptor is assigned to, though only after an
assignment has been performed. Consequently, if slots are in use
in the owning class, a slot will need to be added with this name.
This is a data descriptor.
"""
__slots__ = []
def _attr(self, cls):
"""
Return the name used to stow the reference itself in the
owner's dictionary.
@param cls: Owner class
"""
return findPropCaller(self, cls) + '_ref'
def __get__(self, obj, cls):
"""
If the stored reference exists and is not C{None}, de-reference
it and return that value. Otherwise, return C{None}.
"""
if obj:
prop = self._attr(cls)
ref = obj.__dict__.get(prop, None)
if ref:
return ref()
else:
return None
else:
# Pass-through
return self
def __set__(self, obj, value):
"""
Store a weak reference to C{value}, or C{None} if C{value} is
C{None}.
"""
if value is not None:
value = weakref.ref(value)
prop = self._attr(type(obj))
obj.__dict__[prop] = value
def __delete__(self, obj):
"""
Assign C{None}.
"""
prop = self._attr(type(obj))
obj.__dict__[prop] = None
More docstr improvements to findPropCaller
#
# Copyright (c) 2008 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Types used internally by the rBuild implementation.
As with all internal components, these interfaces are subject to
change.
"""
import weakref
def findPropCaller(descr, othercls):
"""
Figure out what attribute of class C{othercls} a descriptor
C{descr} is stored in.
C{othercls} and its entire method-resolution order will be
searched.
NOTE: Never store multiple copies of the same instance of any
descriptor, especially ones that use this function. If you do,
this function will probably return the wrong name!
@param descr: Descriptor to locate
@type descr: C{object}
@param othercls: Class in which to find C{descr}
@type othercls: C{type}
"""
for cls in othercls.mro():
for key, value in cls.__dict__.iteritems():
if value is descr:
return key
# Nothing in the object's MRO references this descriptor.
raise AssertionError("invalid descriptor call")
class AttributeHook(object):
"""
A property descriptor that "hooks" all assignments.
Before an assignment, a method is invoked on the new value (if it
is not C{None}), with the parent object as the only argument.
Attribute fetches are passed through. Deletions will assign C{None}.
This is a data descriptor.
@param attribute: The attribute to invoke on the new value before
the assignment is performed.
@type attribute: C{basestring}
"""
__slots__ = ['attribute']
def __init__(self, attribute):
self.attribute = attribute
def _attr(self, cls):
"""
Get the called attribute name.
@param cls: Owner class
"""
return findPropCaller(self, cls)
def __get__(self, obj, cls):
"""
Pass-through; should behave identically to a descriptorless fetch.
"""
prop = self._attr(cls)
if obj:
return obj.__dict__.get(prop, None)
else:
return self
def __set__(self, obj, value):
"""
If C{value} is not C{None}, invoke a pre-hook on that value.
"""
if value is not None:
getattr(value, self.attribute)(obj)
prop = self._attr(type(obj))
obj.__dict__[prop] = value
def __delete__(self, obj):
"""
Assign C{None}.
"""
prop = self._attr(type(obj))
obj.__dict__[prop] = None
class WeakReference(object):
"""
A property descriptor that transparently weak references its value.
Upon assignment, a weak reference is created, and further accesses
dereference it automatically. The default value is always C{None}.
The generated weak reference can be accessed by appending "_ref" to
the name this descriptor is assigned to, though only after an
assignment has been performed. Consequently, if slots are in use
in the owning class, a slot will need to be added with this name.
This is a data descriptor.
"""
__slots__ = []
def _attr(self, cls):
"""
Return the name used to stow the reference itself in the
owner's dictionary.
@param cls: Owner class
"""
return findPropCaller(self, cls) + '_ref'
def __get__(self, obj, cls):
"""
If the stored reference exists and is not C{None}, de-reference
it and return that value. Otherwise, return C{None}.
"""
if obj:
prop = self._attr(cls)
ref = obj.__dict__.get(prop, None)
if ref:
return ref()
else:
return None
else:
# Pass-through
return self
def __set__(self, obj, value):
"""
Store a weak reference to C{value}, or C{None} if C{value} is
C{None}.
"""
if value is not None:
value = weakref.ref(value)
prop = self._attr(type(obj))
obj.__dict__[prop] = value
def __delete__(self, obj):
"""
Assign C{None}.
"""
prop = self._attr(type(obj))
obj.__dict__[prop] = None
|
from datetime import datetime
import logging
import time
import requests
__author__ = "Michaël Hompus"
__copyright__ = "Copyright 2018, Michaël Hompus"
__license__ = "MIT"
__email__ = "michael@hompus.nl"
class DarkSkyApi:
def __init__(self, api_key):
self.api_key = api_key
def get_temperature(self, latitude, longitude):
if latitude is None or longitude is None:
return None
data = {
'apiKey' : self.api_key,
'latitude' : latitude,
'longitude' : longitude
}
url = "https://api.darksky.net/forecast/{apiKey}/{latitude},{longitude}?units=si&exclude=minutely,hourly,daily,alerts,flags".format(**data)
for i in range(1, 4):
try:
r = requests.get(url, timeout=10)
r.raise_for_status()
result = r.json()
return result['currently']['temperature']
except requests.exceptions.RequestException as arg:
logging.warning(arg)
time.sleep(i ** 3)
else:
logging.error("Failed to call DarkSky API")
def get_temperature_for_day(self, latitude, longitude, date):
if latitude is None or longitude is None:
return None
data = {
'apiKey' : self.api_key,
'latitude' : latitude,
'longitude' : longitude,
'date' : date.astimezone(datetime.now().tzinfo).isoformat()
}
url = "https://api.darksky.net/forecast/{apiKey}/{latitude},{longitude},{date}?units=si&exclude=minutely,currently,daily,alerts,flags".format(**data)
for i in range(1, 4):
try:
r = requests.get(url, timeout=10)
r.raise_for_status()
result = r.json()
return result['hourly']['data']
except requests.exceptions.RequestException as arg:
logging.warning(arg)
time.sleep(i ** 3)
else:
logging.error("Failed to call DarkSky API")
Convert file to Unix format; add newline to end of file
from datetime import datetime
import logging
import time
import requests
__author__ = "Michaël Hompus"
__copyright__ = "Copyright 2018, Michaël Hompus"
__license__ = "MIT"
__email__ = "michael@hompus.nl"
class DarkSkyApi:
def __init__(self, api_key):
self.api_key = api_key
def get_temperature(self, latitude, longitude):
if latitude is None or longitude is None:
return None
data = {
'apiKey' : self.api_key,
'latitude' : latitude,
'longitude' : longitude
}
url = "https://api.darksky.net/forecast/{apiKey}/{latitude},{longitude}?units=si&exclude=minutely,hourly,daily,alerts,flags".format(**data)
for i in range(1, 4):
try:
r = requests.get(url, timeout=10)
r.raise_for_status()
result = r.json()
return result['currently']['temperature']
except requests.exceptions.RequestException as arg:
logging.warning(arg)
time.sleep(i ** 3)
else:
logging.error("Failed to call DarkSky API")
def get_temperature_for_day(self, latitude, longitude, date):
if latitude is None or longitude is None:
return None
data = {
'apiKey' : self.api_key,
'latitude' : latitude,
'longitude' : longitude,
'date' : date.astimezone(datetime.now().tzinfo).isoformat()
}
url = "https://api.darksky.net/forecast/{apiKey}/{latitude},{longitude},{date}?units=si&exclude=minutely,currently,daily,alerts,flags".format(**data)
for i in range(1, 4):
try:
r = requests.get(url, timeout=10)
r.raise_for_status()
result = r.json()
return result['hourly']['data']
except requests.exceptions.RequestException as arg:
logging.warning(arg)
time.sleep(i ** 3)
else:
logging.error("Failed to call DarkSky API")
|
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from codeclimate_test_reporter.components.runner import Runner as CodeClimateRunner
from datetime import datetime
from jinja2 import Template
from pylint import epylint
from sphinx.cmd.build import main as sphinx_build
from sphinx.apidoc import main as sphinx_apidoc
from mock import patch
from six.moves import configparser
from xml.dom import minidom
import abduct
import attrdict
import capturer
import click
import coverage
import coveralls
import dateutil.parser
import email
import email.header
import email.message
import email.utils
import enum
import fnmatch
import ftputil
import github
import glob
import graphviz
# import instrumental.api
import json
import karr_lab_build_utils
import logging
import mock
import networkx
import nose
import os
import pip
import pip_check_reqs
import pip_check_reqs.find_extra_reqs
import pip_check_reqs.find_missing_reqs
# import pkg_utils
# pkg_utils is not imported globally so that we can use karr_lab_build_utils to properly calculate its coverage
# :todo: figure out how to fix this
import pkg_resources
import pytest
import re
import requests
import shutil
import smtplib
import subprocess
import sys
import tempfile
import time
import twine.commands.upload
import yaml
import warnings
import whichcraft
class CoverageType(enum.Enum):
""" Types of coverage """
statement = 0
branch = 1
multiple_condition = 2
decision = 2
class Environment(enum.Enum):
""" Environments to run tests """
local = 0
docker = 1
circleci = 2
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Archive reports to test history server, Coveralls, and Code Climate
Attributes:
test_runner (:obj:`str`): name of test runner {pytest, nose}
repo_name (:obj:`str`): repository name
repo_owner (:obj:`str`): name of the repository owner
repo_branch (:obj:`str`): repository branch name
repo_revision (:obj:`str`): sha of repository revision
build_num (:obj:`int`): CircleCI build number
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_xml_dir (:obj:`str`): local directory to store latest XML test report
proj_tests_xml_latest_filename (:obj:`str`): file name to store latest XML test report
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_doctrees_dir (:obj:`str`): local directory where doc trees should be saved
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
proj_docs_build_spelling_dir (:obj:`str`): local directory where spell check results should be saved
build_image (:obj:`str`): Docker image to use to run tests
coveralls_token (:obj:`str`): Coveralls token
code_climate_token (:obj:`str`): Code Climate token
github_username (obj:`str`): GitHub username
github_password (obj:`str`): GitHub password
circleci_api_token (:obj:`str`): CircleCI API token
test_server_token (:obj:`str`): test history report server token
karr_lab_daemon_gmail_password (:obj:`obj:`str`): password for karr.lab.daemon@gmail.com
INITIAL_PACKAGE_VERSION (:obj:`str`): initial package version
DEFAULT_BUILD_IMAGE_VERSION (:obj:`str`): default build image version
DEFAULT_TEST_RUNNER (:obj:`str`): default test runner {pytest, nose}
DEFAULT_PROJ_TESTS_DIR (:obj:`str`): default local directory with test code
DEFAULT_PROJ_TESTS_XML_DIR (:obj:`str`): default local directory where the test reports generated should be saved
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME (:obj:`str`): default file name to store latest XML test report
DEFAULT_PROJ_DOCS_DIR (:obj:`str`): default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR (:obj:`str`): default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR (:obj:`str`): default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_SPELLING_DIR (:obj:`str`): default local directory where spell check results should be saved
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR (:obj:`str`): default local directory where generated HTML documentation should be saved
DEFAULT_BUILD_IMAGE (:obj:`str`): default Docker image to use to run tests
GITHUB_API_ENDPOINT (:obj:`str`): GitHub API endpoint
CIRCLE_API_ENDPOINT (:obj:`str`): CircleCI API endpoint
COVERALLS_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Coveralls
CODE_CLIMATE_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Code Climate
"""
INITIAL_PACKAGE_VERSION = '0.0.1'
DEFAULT_BUILD_IMAGE_VERSION = '0.0.19'
DEFAULT_TEST_RUNNER = 'pytest'
DEFAULT_PROJ_TESTS_DIR = 'tests'
DEFAULT_PROJ_TESTS_XML_DIR = 'tests/reports'
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME = 'latest'
DEFAULT_PROJ_DOCS_DIR = 'docs'
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR = 'docs/_build/doctrees'
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR = 'docs/_build/spelling'
DEFAULT_BUILD_IMAGE = 'karrlab/build:latest'
GITHUB_API_ENDPOINT = 'https://api.github.com'
CIRCLE_API_ENDPOINT = 'https://circleci.com/api/v1.1'
COVERALLS_ENABLED = True
CODE_CLIMATE_ENABLED = True
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.test_runner = os.getenv('TEST_RUNNER', self.DEFAULT_TEST_RUNNER)
if self.test_runner not in ['pytest', 'nose']:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
self.repo_type = 'github'
self.repo_name = os.getenv('CIRCLE_PROJECT_REPONAME')
self.repo_owner = os.getenv('CIRCLE_PROJECT_USERNAME') or 'KarrLab'
self.repo_branch = os.getenv('CIRCLE_BRANCH')
self.repo_revision = os.getenv('CIRCLE_SHA1')
try:
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM')))
except (TypeError, ValueError, ):
self.build_num = 0
self.proj_tests_dir = self.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_xml_dir = self.DEFAULT_PROJ_TESTS_XML_DIR
self.proj_tests_xml_latest_filename = self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME
self.proj_docs_dir = self.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = self.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = self.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_doctrees_dir = self.DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR
self.proj_docs_build_html_dir = self.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.proj_docs_build_spelling_dir = self.DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR
self.build_image = self.DEFAULT_BUILD_IMAGE
self.coveralls_token = os.getenv('COVERALLS_REPO_TOKEN')
self.code_climate_token = os.getenv('CODECLIMATE_REPO_TOKEN')
self.github_username = os.getenv('GITHUB_USERNAME')
self.github_password = os.getenv('GITHUB_PASSWORD')
self.circleci_api_token = os.getenv('CIRCLECI_API_TOKEN')
self.test_server_token = os.getenv('TEST_SERVER_TOKEN')
self.karr_lab_daemon_gmail_password = os.getenv('KARR_LAB_DAEMON_GMAIL_PASSWORD')
self.code_server_hostname = 'code.karrlab.org'
self.code_server_username = 'karrlab_code'
self.code_server_password = os.getenv('CODE_SERVER_PASSWORD')
self.code_server_directory = '/code.karrlab.org/repo'
#####################
# Create a package
#####################
def create_package(self):
""" Create a package
* Create a local Git repository
* Create a remote GitHub repository
* Add the repository to Code Climate
* Add the repository to Coveralls
* Add the repository to CircleCI project (by following the GitHub repository)
* Add environment variable for tokens for code.karrlab.org, Coveralls, Code Climate, and CircleCI
* Add environment variable for password for karr.lab.daemon@gmail.com
* Generate API token for status badge
* If the repository is not private, add the repository to Read the Docs
* Add the package to code.karrlab.org
* Add JSON-formatted file to ``ssh://code.karrlab.org:/home/karrlab_code/code.karrlab.org/repo/{{ name }}.json``
* Add badges for Code Climate, Coveralls, CircleCI, and Read the Docs to README.md
* Add package name to ``.circleci/downstream_dependencies.yml`` files of all dependencies
"""
# print introductory message
print('This program will guide you through creating a new package.')
click.confirm('Continue?', default=True, abort=True)
# gather basic information
name = click.prompt('Enter the name of the new package', type=str)
description = click.prompt('Enter a brief description of the new package', type=str)
keywords = click.prompt('Enter a comma-separated list of keywords for the new package', type=str, default=' ')
keywords = [kw.strip() for kw in keywords.strip().split(',') if kw.strip()]
dependencies = click.prompt(
'Enter a comma-separated list of Karr Lab packages that the new package depends on', type=str, default=' ')
dependencies = [dep.strip() for dep in dependencies.strip().split(',') if dep.strip()]
private = click.confirm('Should the repository be private?', default=True)
dirname = click.prompt('Enter the directory for the new package', type=str, default=os.path.join('.', name))
build_image_version = click.prompt('Enter the build image version to test the package',
type=str, default=self.DEFAULT_BUILD_IMAGE_VERSION)
github_username = click.prompt('Enter your GitHub username', type=str, default=self.github_username)
github_password = click.prompt('Enter your GitHub password', type=str, hide_input=True,
default='*' * len(self.github_password or ''))
if github_password == '*' * len(self.github_password or ''):
github_password = self.github_password
circleci_api_token = click.prompt('Enter the CircleCI API token for the karr-lab-daemon GitHub account',
type=str, hide_input=True, default='*' * len(self.circleci_api_token or ''))
if circleci_api_token == '*' * len(self.circleci_api_token or ''):
circleci_api_token = self.circleci_api_token
test_server_token = click.prompt('Enter the token for tests.karrlab.org', type=str,
hide_input=True, default='*' * len(self.test_server_token or ''))
if test_server_token == '*' * len(self.test_server_token or ''):
test_server_token = self.test_server_token
karr_lab_daemon_gmail_password = click.prompt('Enter the password for karr.lab.daemon@gmail.com',
type=str, hide_input=True,
default='*' * len(self.karr_lab_daemon_gmail_password or ''))
if karr_lab_daemon_gmail_password == '*' * len(self.karr_lab_daemon_gmail_password or ''):
karr_lab_daemon_gmail_password = self.karr_lab_daemon_gmail_password
code_server_username = click.prompt('Enter your username for ftp://' + self.code_server_hostname,
type=str, default=self.code_server_username)
code_server_password = click.prompt('Enter your password for ftp://' + self.code_server_hostname,
type=str, hide_input=True, default='*' * len(self.code_server_password or ''))
if code_server_password == '*' * len(self.code_server_password or ''):
code_server_password = self.code_server_password
# create local and GitHub Git repositories
print('Creating {} remote Git repository "{}/{}" on GitHub and cloning this repository to "{}"'.format(
'private' if private else 'public', self.repo_owner, name, dirname))
self.create_repository(name, description=description, private=private, dirname=dirname,
github_username=github_username, github_password=github_password)
# Code Climate
# :todo: programmatically add repo to Code Climate and generate tokens
print('Visit "https://codeclimate.com/dashboard" and click on the "{}" organization.'.format(
self.repo_owner if private else 'Open source'))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Sync now" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add a repository" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add repo" button for the "{}" repository'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "settings" link'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Cick the "Test coverage" menu item')
click.confirm('Continue?', default=True, abort=True)
codeclimate_repo_token = click.prompt('Enter the "test reporter id"')
print('Cick the "Badges" menu item')
click.confirm('Continue?', default=True, abort=True)
codeclimate_repo_id = click.prompt('Enter the repository ID (ID in the URL https://codeclimate.com/repos/<id>/maintainability)')
codeclimate_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://api.codeclimate.com/v1/badges/<token>/maintainability)')
# Coveralls
# :todo: programmatically add repo to Coveralls and generate tokens
print('Visit "https://coveralls.io/repos/new"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "SYNC REPOS" button')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}/{}" repository and click its "OFF" button'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the details button for the "{}/{}" repository'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Settings" menu item')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_token = click.prompt('Enter the "REPO TOKEN"')
print('Click the "README BADGE" EMBED" button')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://coveralls.io/repos/github/KarrLab/test_a/badge.svg?t=<token>')
# CircleCI
# :todo: programmatically create CircleCI build
# :todo: programmatically create CircleCI token for status badges
has_private_dependencies = False
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
for dependency in dependencies:
try:
repo = org.get_repo(dependency)
has_private_dependencies = has_private_dependencies or repo.private
except github.UnknownObjectException:
pass
print('Visit "https://circleci.com/add-projects/gh/KarrLab"')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}" repository and click its "Follow project" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Project settings" icon')
click.confirm('Continue?', default=True, abort=True)
if has_private_dependencies:
print('Click the "Checkout SSH keys" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Authorize with GitHub" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create and add ... user key" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "API permissions" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create Token" button')
click.confirm('Continue?', default=True, abort=True)
print('Select "All", enter a label, and click the "Add Token" button')
click.confirm('Continue?', default=True, abort=True)
circleci_repo_token = click.prompt('Enter the new token')
vars = {
'CIRCLECI_API_TOKEN': circleci_api_token,
'COVERALLS_REPO_TOKEN': coveralls_repo_token,
'CODECLIMATE_REPO_TOKEN': codeclimate_repo_token,
'KARR_LAB_DAEMON_GMAIL_PASSWORD': karr_lab_daemon_gmail_password,
'TEST_SERVER_TOKEN': test_server_token,
}
self.set_circleci_environment_variables(vars, repo_name=name, circleci_api_token=circleci_api_token)
# Read the Docs
if not private:
# :todo: programmatically add repo to Read the Docs
print('Visit "https://readthedocs.org/dashboard/import/?"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "refresh" icon')
click.confirm('Continue?', default=True, abort=True)
print('Find the "{}" repository and click its "+" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Next" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Admin" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Advanced settings" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Requirements file" to "docs/requirements.txt"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python configuration file" to "docs/conf.py"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python interpreter" to "CPython 3.x"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Maintainers" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr" to the maintainers')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Notifications" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add your email address and click submit')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr@gmail.com" and click submit')
click.confirm('Continue?', default=True, abort=True)
# add package to code.karrlab.org
with open(pkg_resources.resource_filename('karr_lab_build_utils',
os.path.join('templates', 'code_server', '_package_.json')), 'r') as file:
template = Template(file.read())
fid, local_filename = tempfile.mkstemp()
os.close(fid)
context = {
'name': name,
'description': description,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_token': coveralls_repo_token,
'codeclimate_repo_id': codeclimate_repo_id,
}
template.stream(**context).dump(local_filename)
with ftputil.FTPHost(self.code_server_hostname, code_server_username, code_server_password) as ftp:
remote_filename = ftp.path.join(self.code_server_directory, '{}.json'.format(name))
ftp.upload(local_filename, remote_filename)
os.remove(local_filename)
# setup repository
self.setup_repository(name, description=description, keywords=keywords, dependencies=dependencies,
private=private, build_image_version=build_image_version, dirname=dirname,
circleci_repo_token=circleci_repo_token, coveralls_repo_badge_token=coveralls_repo_badge_token,
codeclimate_repo_id=codeclimate_repo_id, codeclimate_repo_badge_token=codeclimate_repo_badge_token)
# append package to downstream dependencies of dependencies
parent_dirname = os.path.dirname(dirname)
for dependency in dependencies:
downstream_deps_filename = os.path.join(parent_dirname, dependency, '.circleci', 'downstream_dependencies.yml')
if os.path.isfile(downstream_deps_filename):
with open(downstream_deps_filename, 'r') as file:
downstream_deps = yaml.load(file)
downstream_deps.append(name)
with open(downstream_deps_filename, 'w') as file:
yaml.dump(downstream_deps, file, default_flow_style=False)
else:
warnings.warn(('Unable to append package to downstream dependency {} because the '
'downstream dependency is not available').format(dependency),
UserWarning)
def create_repository(self, name, description='', private=True, dirname=None, github_username=None, github_password=None):
""" Create a Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
dirname (:obj:`str`, optional): directory name for repository
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
"""
# process arguments
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
dirname = dirname or os.path.join('.', name)
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
# create GitHub repository
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
org.create_repo(name=name, description=description, private=private, auto_init=True)
# initialize Git
gitconfig_filename = os.path.expanduser('~/.gitconfig')
has_gitconfig = os.path.isfile(gitconfig_filename)
if has_gitconfig:
os.rename(gitconfig_filename, gitconfig_filename + '.ignore')
import pygit2
credentials = pygit2.UserPass(github_username, github_password)
callbacks = pygit2.RemoteCallbacks(credentials=credentials)
pygit2.clone_repository('https://github.com/KarrLab/{}.git'.format(name), dirname, callbacks=callbacks)
if has_gitconfig:
os.rename(gitconfig_filename + '.ignore', gitconfig_filename)
def setup_repository(self, name, description='', keywords=None, dependencies=None, private=True, build_image_version=None,
dirname=None, circleci_repo_token=None, coveralls_repo_badge_token=None, codeclimate_repo_id=None,
codeclimate_repo_badge_token=None):
""" Setup Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
keywords (:obj:`list` of :obj:`str`, optional): list of keywords
dependencies (:obj:`list` of :obj:`str`, optional): list of Karr Lab packages that the package depends on
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
build_image_version (:obj:`str`, optional): build image version
dirname (:obj:`str`, optional): directory name
circleci_repo_token (:obj:`str`, optional): CircleCI API token (e.g. for badges) for the repository
coveralls_repo_badge_token (:obj:`str`, optional): Coveralls badge token for the repository
codeclimate_repo_id (:obj:`str`, optional): Code Climate ID for the repository
codeclimate_repo_badge_token (:obj:`str`, optional): Code Climate for the repository
"""
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
keywords = keywords or []
dependencies = dependencies or []
if not build_image_version:
build_image_version = self.DEFAULT_BUILD_IMAGE_VERSION
dirname = dirname or os.path.join('.', name)
# create a directory for the repository
if not os.path.isdir(dirname):
os.makedirs(dirname)
# create files
filenames = (
'.gitignore',
'LICENSE',
'MANIFEST.in',
'README.md',
'requirements.txt',
'requirements.optional.txt',
'setup.py',
'setup.cfg',
'tests/requirements.txt',
'tests/test_core.py',
'tests/test_main.py',
'.circleci/config.yml',
'.circleci/downstream_dependencies.yml',
'.readthedocs.yml',
'_package_/__init__.py',
'_package_/VERSION',
'_package_/core.py',
'_package_/__main__.py',
)
now = datetime.now()
context = {
'name': name,
'description': description,
'keywords': keywords,
'version': self.INITIAL_PACKAGE_VERSION,
'year': now.year,
'date': '{}-{}-{}'.format(now.year, now.month, now.day),
'dependencies': dependencies,
'build_image_version': build_image_version,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_badge_token': coveralls_repo_badge_token,
'codeclimate_repo_id': codeclimate_repo_id,
'codeclimate_repo_badge_token': codeclimate_repo_badge_token,
}
for filename in filenames:
if os.path.dirname(filename) and not os.path.isdir(os.path.join(dirname, os.path.dirname(filename))):
os.makedirs(os.path.join(dirname, os.path.dirname(filename)))
with open(pkg_resources.resource_filename(
'karr_lab_build_utils',
os.path.join('templates', filename)), 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, filename))
os.rename(os.path.join(dirname, '_package_'), os.path.join(dirname, name))
self.create_documentation_template(dirname)
###########################
# Register repo on CircleCI
###########################
def follow_circleci_build(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None,
has_private_dependencies=False):
""" Follow CircleCI build for a repository
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
has_private_dependencies (:obj:`bool`, optional): if :obj:`True`, add a GitHub SSH key for the Karr Lab machine user to the build
Raises:
:obj:`ValueError`: if a CircleCI build wasn't followed and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# follow repo
result = self.run_circleci_api('/follow',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
if 'following' not in result or not result['following']:
raise ValueError(
'Unable to follow CircleCI build for repository {}/{}'.format(repo_owner, repo_name))
# add checkout key
if has_private_dependencies:
# :todo: add a GitHub SSH key for the Karr Lab machine user to the build
pass # pragma: no cover
def get_circleci_environment_variables(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Get the CircleCI environment variables for a repository and their partial values
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their partial values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
vars = self.run_circleci_api('/envvar',
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
return {var['name']: var['value'] for var in vars}
def set_circleci_environment_variables(self, vars, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Set the CircleCI environment variables for a repository
Args:
vars (:obj:`dict`): dictionary of environment variables to set
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# get current environment variables
old_vars = self.get_circleci_environment_variables(
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# update environment variables
for name, value in vars.items():
# delete environment variables which we want to overwrite
if name in old_vars:
self.delete_circleci_environment_variable(name,
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# add environment variable
self.run_circleci_api('/envvar',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token, data={'name': name, 'value': value})
def delete_circleci_environment_variable(self, var, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Delete a CircleCI environment variable for a repository
Args:
var (:obj:`str`): name of variable to delete
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
self.run_circleci_api('/envvar/{}'.format(var),
method='delete', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
def create_codeclimate_github_webhook(self, repo_type=None, repo_owner=None, repo_name=None,
github_username=None, github_password=None):
""" Create GitHub webhook for Code Climate
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
Raises:
:obj:`ValueError`: if webhook wasn't created and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
url = '{}/repos/{}/{}/hooks'.format(self.GITHUB_API_ENDPOINT, repo_owner, repo_name)
response = requests.post(url, auth=(github_username, github_password), json={
'name': 'web',
'config': {
'url': 'https://codeclimate.com/webhooks',
'content_type': 'form',
},
'events': [
'push',
'pull_request'
],
'active': True,
})
if response.status_code != 201:
if 'errors' in response.json():
msg = response.json()['errors'][0]['message']
else:
msg = response.json()['message']
raise ValueError('Unable to create webhook for {}/{}: {}'.format(repo_owner, repo_name, msg))
#########################
# Installing dependencies
#########################
def install_requirements(self):
""" Install requirements """
# upgrade pip, setuptools
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'setuptools'])
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'pip'])
# requirements for package
self._install_requirements_helper('requirements.txt')
self._install_requirements_helper('requirements.optional.txt', ignore_options=True)
self._install_requirements_helper(os.path.join(self.proj_tests_dir, 'requirements.txt'))
self._install_requirements_helper(os.path.join(self.proj_docs_dir, 'requirements.txt'))
# upgrade CircleCI
if whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
def _install_requirements_helper(self, filename, ignore_options=False):
""" Install the packages in a requirements.txt file, including all optional dependencies
Args:
filename (:obj:`str`): path to requirements file
ignore_options (:obj:`bool`, optional): if :obj:`True`, ignore option headings
(e.g. for requirements.optional.txt)
"""
if not os.path.isfile(filename):
return
# create a temporary file that has the optional markings removed
if ignore_options:
sanitized_file, sanitized_filename = tempfile.mkstemp(suffix='.txt')
os.close(sanitized_file)
with open(filename, 'r') as file:
with open(sanitized_filename, 'w') as sanitized_file:
for line in file:
line = line.strip()
if line and line[0] == '[':
continue
sanitized_file.write(line + '\n')
filename = sanitized_filename
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links', '-r', filename])
# cleanup temporary file
if ignore_options:
os.remove(sanitized_filename)
def upgrade_requirements(self):
""" Upgrade requirements from the Karr Lab's GitHub organization
Returns:
:obj:`list` of :obj:`str`: upgraded requirements from the Karr Lab's GitHub organization
"""
# get PyPI requirements
lines = self.run_method_and_capture_stdout(pip.main, ['freeze'])
pkgs = []
for line in lines.split('\n'):
if not line.startswith('-e') and '==' in line:
pkgs.append(line.partition('==')[0])
infos = self.run_method_and_capture_stdout(pip.main, ['show'] + pkgs)
reqs = []
for info in infos.split('---\n'):
if 'github.com/KarrLab/' in info:
name = info.partition('Name: ')[2].partition('\n')[0].replace('-', '_')
url = info.partition('Home-page: ')[2].partition('\n')[0]
reqs.append('git+{}.git#egg={}[all]'.format(url, name))
# ugrade PyPI requirements
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links'] + reqs)
# upgrade CircleCI
if whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
return reqs
########################
# Running tests
########################
def run_tests(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, environment=Environment.local, exit_on_failure=True,
ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
environment (:obj:`str`, optional): environment to run tests (local, docker, or circleci-local-executor)
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key; needed for Docker environment
Raises:
:obj:`BuildHelperError`: If the environment is not supported or the package directory not set
"""
if environment == Environment.local:
self._run_tests_local(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, exit_on_failure=exit_on_failure)
elif environment == Environment.docker:
self._run_tests_docker(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, ssh_key_filename=ssh_key_filename)
elif environment == Environment.circleci:
self._run_tests_circleci(dirname=dirname, test_path=test_path, verbose=verbose, ssh_key_filename=ssh_key_filename)
else:
raise BuildHelperError('Unsupported environment: {}'.format(environment))
def _run_tests_local(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, exit_on_failure=True):
""" Run unit tests located at `test_path` locally
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
Raises:
:obj:`BuildHelperError`: If the package directory not set
"""
py_v = self.get_python_version()
abs_xml_latest_filename = os.path.join(
self.proj_tests_xml_dir, '{0}.{1}.xml'.format(self.proj_tests_xml_latest_filename, py_v))
if with_coverage:
if coverage_type == CoverageType.statement:
cov = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True)
cov.start()
elif coverage_type == CoverageType.branch:
cov = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True, branch=True)
cov.start()
# elif coverage_type == CoverageType.multiple_condition:
# # :todo: support instrumental once its dependency astkit is updated for Python 3
# parser = configparser.ConfigParser()
# parser.read(os.path.join(dirname, 'setup.cfg'))
# targets = parser.get('coverage:run', 'source').strip().split('\n')
# targets = [target.strip() for target in targets]
#
# opts = attrdict.AttrDict({
# 'file': os.path.join(coverage_dirname, '.coverage.' + py_v),
# 'report': False,
# 'label': False,
# 'summary': False,
# 'statements': False,
# 'xml': False,
# 'html': False,
# 'all': False,
# 'targets': targets,
# 'ignores': [],
# 'report_conditions_with_literals': False,
# 'instrument_assertions': True,
# 'use_metadata_cache': False,
# 'instrument_comparisons': True,
# })
# cov = instrumental.api.Coverage(opts, os.getcwd())
# cov.start(opts.targets, opts.ignores)
else:
raise BuildHelperError('Unsupported coverage type: {}'.format(coverage_type))
if with_xunit and not os.path.isdir(self.proj_tests_xml_dir):
os.makedirs(self.proj_tests_xml_dir)
if self.test_runner == 'pytest':
test_path = test_path.replace(':', '::')
test_path = re.sub('::(.+?)(\.)', r'::\1::', test_path)
argv = [test_path]
if verbose:
argv.append('--capture=no')
if with_xunit:
argv.append('--junitxml=' + abs_xml_latest_filename)
result = pytest.main(argv)
elif self.test_runner == 'nose':
test_path = test_path.replace('::', ':', 1)
test_path = test_path.replace('::', '.', 1)
argv = ['nosetests', test_path]
if verbose:
argv.append('--nocapture')
if with_xunit:
argv += ['--with-xunit', '--xunit-file', abs_xml_latest_filename]
result = int(not nose.run(argv=argv))
else:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
if with_coverage:
cov.stop() # pragma: no cover # this line can't be covered
cov.save()
if exit_on_failure and result != 0:
sys.exit(1)
def _run_tests_docker(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using a Docker image:
#. Create a container based on the build image (e.g, karrlab/build:latest)
#. Copy your GitHub SSH key to the container
#. Remove Python cache directories (``__pycache__``) from the package
#. Copy the package to the container at ``/root/projects``
#. Install the Karr Lab build utilities into the container
#. Install the requirements for the package in the container
#. Run the tests inside the container using the same version of Python that called this method
#. Delete the container
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
# pick container name
basename = os.path.basename(os.path.abspath(dirname))
now = datetime.now()
container = 'build-{0}-{1.year}-{1.month}-{1.day}-{1.hour}-{1.minute}-{1.second}'.format(basename, now)
# get Python version
py_v = '{}.{}'.format(sys.version_info[0], sys.version_info[1])
# create container
print('\n\n')
print('=====================================')
print('== Creating container')
print('=====================================')
self._run_docker_command(['run', '-it', '-d', '--name', container, self.build_image, 'bash'])
# copy GitHub SSH key to container
print('\n\n')
print('=====================================')
print('== Copying SSH key to container')
print('=====================================')
self._run_docker_command(['cp', ssh_key_filename, container + ':/root/.ssh/'])
# delete __pycache__ directories
print('\n\n')
print('=====================================')
print('== Deleting __pycache__ directories')
print('=====================================')
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# copy package to container
print('\n\n')
print('=====================================')
print('== Copying package to container')
print('=====================================')
self._run_docker_command(['cp', os.path.abspath(dirname), container + ':/root/project'])
# install pkg_utils
print('\n\n')
print('=====================================')
print('== Install pkg_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/pkg_utils.git#egg=pkg_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install Karr Lab build utils
print('\n\n')
print('=====================================')
print('== Install karr_lab_build_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/karr_lab_build_utils.git#egg=karr_lab_build_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install package
print('\n\n')
print('=====================================')
print('== Install package')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && pip{} install --process-dependency-links -e .'.format(py_v)])
# install dependencies
print('\n\n')
print('=====================================')
print('== Install dependencies')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && karr_lab_build_utils{} upgrade-requirements'.format(py_v)])
# test package in container
print('\n\n')
print('=====================================')
print('== Running tests')
print('=====================================')
options = []
options += ['--test-path', test_path]
if with_coverage:
options += ['--with-coverage', '--coverage-type', coverage_type.name]
if with_xunit:
options.append('--with-xunit')
if verbose:
options.append('--verbose')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && karr_lab_build_utils{} run-tests {}'.format(py_v, ' '.join(options))],
raise_error=False)
if with_coverage:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', '.coverage.{}.*'.format(py_v))])
match = re.search('/root/project/(\.coverage\.\d+\.\d+\.\d+)', out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(coverage_dirname, match.group(1))])
if with_xunit:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', self.DEFAULT_PROJ_TESTS_XML_DIR,
'{}.{}.*.xml'.format(self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME, py_v))])
match = re.search('/root/project/{}/({}\.\d+\.\d+\.\d+.xml)'.format(self.DEFAULT_PROJ_TESTS_XML_DIR,
self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME), out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(self.proj_tests_xml_dir, match.group(1))])
# stop and remove container
print('\n\n')
print('=====================================')
print('== Removing container')
print('=====================================')
self._run_docker_command(['rm', '-f', container])
def _run_docker_command(self, cmd, cwd=None, raise_error=True):
""" Run a docker command
Args:
cmd (:obj:`list`): docker command to run
cwd (:obj:`str`, optional): directory from which to run :obj:`cmd`
raise_error (:obj:`bool`, optional): if true, raise errors
Returns:
:obj:`str`: standard output
Raises:
:obj:`BuildHelperError`: if the docker command fails
"""
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['docker'] + cmd, cwd=cwd)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
if process.returncode != 0 and raise_error:
raise BuildHelperError(out)
return out
def _run_tests_circleci(self, dirname='.', test_path='tests', verbose=False, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using the CircleCI local executor. This will run the same commands defined in
``.circle/config.yml`` as the cloud version of CircleCI.
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
Raises:
:obj:`BuildHelperError`: if the tests fail
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
karr_lab_build_utils_dirname = os.path.expanduser('~/Documents/karr_lab_build_utils')
# delete __pycache__ directories
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# update CircleCI to use build image with SSH key
circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml')
backup_circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml.save')
with open(circleci_config_filename, 'r') as file:
config = yaml.load(file)
image_name = config['jobs']['build']['docker'][0]['image']
if image_name.endswith('.with_ssh_key'):
image_with_ssh_key_name = image_name
image_name = image_name[:-13]
else:
image_with_ssh_key_name = image_name + '.with_ssh_key'
shutil.copyfile(circleci_config_filename, backup_circleci_config_filename)
config['jobs']['build']['docker'][0]['image'] = image_with_ssh_key_name
with open(circleci_config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# Build docker image with SSH key
circleci_context_dirname = os.path.join(karr_lab_build_utils_dirname, 'circleci_docker_context')
if not os.path.isdir(circleci_context_dirname):
os.makedirs(circleci_context_dirname)
shutil.copy(ssh_key_filename, os.path.join(circleci_context_dirname, 'GITHUB_SSH_KEY'))
dockerfile_filename = os.path.join(circleci_context_dirname, 'Dockerfile_Circleci')
with open(dockerfile_filename, 'w') as file:
file.write('FROM {}\n'.format(image_name))
file.write('COPY circleci_docker_context/GITHUB_SSH_KEY /root/.ssh/id_rsa\n')
file.write('ENV TEST_SERVER_TOKEN={}\n'.format(self.test_server_token or ''))
file.write('RUN eval `ssh-agent` && ssh-add /root/.ssh/id_rsa\n')
file.write('CMD bash\n')
self._run_docker_command(['build',
'--tag', image_with_ssh_key_name,
'-f', os.path.join('circleci_docker_context', 'Dockerfile_Circleci'),
'.'],
cwd=karr_lab_build_utils_dirname)
# test package
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['circleci',
'--env', 'test_path={}'.format(test_path),
'--env', 'verbose={:d}'.format(verbose),
'--env', 'dry_run=1',
'build'], cwd=dirname)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
# revert CircleCI config file
os.remove(circleci_config_filename)
shutil.move(backup_circleci_config_filename, circleci_config_filename)
# delete docker image
self._run_docker_command(['rmi', image_with_ssh_key_name], raise_error=False)
# cleanup circleci context
shutil.rmtree(circleci_context_dirname)
# raise error if tests didn't pass
if process.returncode != 0 or 'Task failed' in out:
raise BuildHelperError(out.encode('utf-8'))
def get_test_results(self):
""" Load test results from a set of XML files
Results:
:obj:`TestResults`: test results
"""
test_results = TestResults()
filename_pattern = os.path.join(self.proj_tests_xml_dir,
'{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for filename in glob.glob(filename_pattern):
match = re.match('^{}\.(.*?)\.xml$'.format(self.proj_tests_xml_latest_filename), os.path.basename(filename))
python_version = match.group(1)
doc = minidom.parse(filename)
suite = doc.getElementsByTagName('testsuite')[0]
for case in suite.getElementsByTagName('testcase'):
case_result = TestCaseResult()
case_result.classname = case.getAttribute('classname')
case_result.name = case.getAttribute('name')
case_result.python_version = python_version
case_result.time = float(case.getAttribute('time'))
if case.hasAttribute('file'):
case_result.file = case.getAttribute('file')
if case.hasAttribute('line'):
case_result.line = int(float(case.getAttribute('line')))
stdout = case.getElementsByTagName('system-out')
if stdout:
case_result.stdout = ''.join([child.nodeValue for child in stdout[0].childNodes])
stderr = case.getElementsByTagName('system-err')
if stderr:
case_result.stderr = ''.join([child.nodeValue for child in stderr[0].childNodes])
skip = case.getElementsByTagName('skipped')
error = case.getElementsByTagName('error')
failure = case.getElementsByTagName('failure')
if skip:
case_result.type = TestCaseResultType.skipped
elif error:
case_result.type = TestCaseResultType.error
elif failure:
case_result.type = TestCaseResultType.failure
else:
case_result.type = TestCaseResultType.passed
not_pass = skip or error or failure
if not_pass:
case_result.subtype = not_pass[0].getAttribute('type')
case_result.message = not_pass[0].getAttribute('message')
case_result.details = ''.join([child.nodeValue for child in not_pass[0].childNodes])
test_results.cases.append(case_result)
return test_results
def get_test_results_status(self, test_results, installation_error, tests_error, other_error, dry_run=False):
""" Get the status of a set of results
* Old err
* New error
* Fixed error
* New downstream error
Args:
test_results (:obj:`TestResults`): test results
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
if dry_run:
return {
'is_fixed': False,
'is_old_error': False,
'is_new_error': False,
'is_other_error': False,
'is_new_downstream_error': False,
}
# determine if there is an error
if (installation_error or tests_error or other_error) and test_results.get_num_tests() == 0:
is_other_error = True
is_new_error = False
is_old_error = False
is_fixed = False
else:
is_other_error = False
passed = test_results.get_num_errors() == 0 and test_results.get_num_failures() == 0
# determine if error is new
if self.build_num <= 1:
if passed:
is_old_error = False
is_new_error = False
is_fixed = True
else:
is_old_error = False
is_new_error = True
is_fixed = False
else:
prev_result = self.run_circleci_api('/' + str(self.build_num - 1))
if passed:
is_old_error = False
is_new_error = False
is_fixed = prev_result['status'] not in ['success', 'fixed']
else:
is_old_error = prev_result['status'] not in ['success', 'fixed']
is_new_error = prev_result['status'] in ['success', 'fixed']
is_fixed = False
# determine if build was triggered by an upstream package
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
if upstream_repo_name and is_new_error and self.build_num > 1 and not is_other_error:
is_new_downstream_error = True
else:
is_new_downstream_error = False
return {
'is_fixed': is_fixed,
'is_old_error': is_old_error,
'is_new_error': is_new_error,
'is_other_error': is_other_error,
'is_new_downstream_error': is_new_downstream_error,
}
def do_post_test_tasks(self, installation_error, tests_error, dry_run=False):
""" Do all post-test tasks for CircleCI
* Make test and coverage reports
* Compile documentation
* Archive test and coverage reports to the Karr Lab test history server, Coveralls, and Code Climate
* Trigger tests of downstream dependencies
* Notify authors of new failures in downstream packages
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:obj:`dict`: status of a set of results
"""
try:
self.make_and_archive_reports(dry_run=dry_run)
other_error = False
except Exception as exception:
other_error = True
triggered_packages = self.trigger_tests_of_downstream_dependencies(dry_run=dry_run)
status = self.send_email_notifications(installation_error, tests_error, other_error, dry_run=dry_run)
return (triggered_packages, status)
def send_email_notifications(self, installation_error, tests_error, other_error, dry_run=False):
""" Send email notifications of failures, fixes, and downstream failures
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
test_results = self.get_test_results()
status = self.get_test_results_status(test_results, installation_error, tests_error, other_error, dry_run=dry_run)
# stop if this is a dry run
if dry_run:
return status
# build context for email
result = self.run_circleci_api('/' + str(self.build_num))
context = {
'repo_name': self.repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': self.build_num,
'build_url': result['build_url'],
'test_results': test_results,
}
if status['is_new_downstream_error']:
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
context['upstream'] = {
'repo_name': upstream_repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': upstream_build_num,
'build_url': result['build_url'],
}
recipients = [{'name': 'Whole-Cell Modeling Developers', 'email': 'wholecell-developers@googlegroups.com'}]
# send notifications
if status['is_fixed']:
subject = '[Builds] [{0}] {0} is fixed!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'fixed.html', context)
elif status['is_old_error']:
subject = '[Builds] [{0}] {0} is still broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'old_error.html', context)
elif status['is_new_error']:
subject = '[Builds] [{0}] {0} has been broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'new_error.html', context)
elif status['is_other_error']:
subject = '[Builds] [{0}] {0} is broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'other_error.html', context)
if status['is_new_downstream_error']:
recipients.append({'name': context['upstream']['committer_name'], 'email': context['upstream']['committer_email']})
subject = '[Builds] [{1}] commit {0} to {1} may have broken {2}'.format(
context['upstream']['commit'], context['upstream']['repo_name'], context['repo_name'])
self._send_notification_email(recipients, subject, 'new_downstream_error.html', context)
return status
def _send_notification_email(self, recipients, subject, template_filename, context, dry_run=False):
""" Send an email notification of test results
Args:
recipients (:obj:`list` of :obj:`dict`): recipient names and email addresses
subject (:obj:`str`): subject
template_filename (obj:`str`): path to template
context (obj:`dict`): context for template
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
full_template_filename = pkg_resources.resource_filename(
'karr_lab_build_utils', os.path.join('templates', 'email_notifications', template_filename))
with open(full_template_filename, 'r') as file:
template = Template(file.read())
body = template.render(**context)
msg = email.message.Message()
msg['From'] = email.utils.formataddr((str(email.header.Header('Karr Lab Build System', 'utf-8')), 'noreply@karrlab.org'))
tos = []
for recipient in recipients:
tos.append(email.utils.formataddr((str(email.header.Header(recipient['name'], 'utf-8')), recipient['email'])))
msg['To'] = ', '.join(tos)
msg['Subject'] = subject
msg.add_header('Content-Type', 'text/html')
msg.set_payload(body)
if not dry_run:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.ehlo()
smtp.starttls()
smtp.login('karr.lab.daemon', os.getenv('KARR_LAB_DAEMON_GMAIL_PASSWORD'))
smtp.sendmail('noreply@karrlab.org', [recipient['email'] for recipient in recipients], msg.as_string())
smtp.quit()
def make_and_archive_reports(self, coverage_dirname='.', dry_run=False):
""" Make and archive reports:
* Upload test report to history server
* Upload coverage report to Coveralls and Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
""" test reports """
# Upload test report to history server
self.archive_test_report()
""" coverage """
# Merge coverage reports
# Generate HTML report
# Upload coverage report to Coveralls and Code Climate
self.combine_coverage_reports(coverage_dirname=coverage_dirname)
self.archive_coverage_report(coverage_dirname=coverage_dirname, dry_run=dry_run)
""" documentation """
self.make_documentation()
########################
# Test reports
########################
def archive_test_report(self):
""" Upload test report to history server
Raises:
:obj:`BuildHelperError`: if there is an error uploading the report to the test history server
"""
if not self.test_server_token or \
self.repo_name is None or \
self.repo_owner is None or \
self.repo_branch is None or \
self.repo_revision is None:
return
abs_xml_latest_filename_pattern = os.path.join(
self.proj_tests_xml_dir, '{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for abs_xml_latest_filename in glob.glob(abs_xml_latest_filename_pattern):
match = re.match('^.*?\.(\d+\.\d+\.\d+)\.xml$', abs_xml_latest_filename)
pyv = match.group(1)
r = requests.post('http://tests.karrlab.org/rest/submit_report',
data={
'token': self.test_server_token,
'repo_name': self.repo_name,
'repo_owner': self.repo_owner,
'repo_branch': self.repo_branch,
'repo_revision': self.repo_revision,
'build_num': self.build_num,
'report_name': pyv,
},
files={
'report': open(abs_xml_latest_filename, 'rb'),
})
r.raise_for_status()
r_json = r.json()
if 'success' not in r_json or not r_json['success']:
raise BuildHelperError('Error uploading report to test history server: {}'.format(r_json['message']))
########################
# Coverage reports
########################
def combine_coverage_reports(self, coverage_dirname='.'):
"""
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
"""
data_paths = []
for name in glob.glob(os.path.join(coverage_dirname, '.coverage.*')):
data_path = tempfile.mktemp()
shutil.copyfile(name, data_path)
data_paths.append(data_path)
# stop if there are no files to combine
if not data_paths:
warnings.warn('No coverage files exist to combine', UserWarning)
return
coverage_doc = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
coverage_doc.combine(data_paths=data_paths)
coverage_doc.save()
def archive_coverage_report(self, coverage_dirname='.', dry_run=False):
""" Archive coverage report:
* Upload report to Coveralls
* Upload report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
# upload to Coveralls
if self.COVERALLS_ENABLED:
self.upload_coverage_report_to_coveralls(coverage_dirname=coverage_dirname, dry_run=dry_run)
# upload to Code Climate
if self.CODE_CLIMATE_ENABLED:
self.upload_coverage_report_to_code_climate(coverage_dirname=coverage_dirname, dry_run=dry_run)
def upload_coverage_report_to_coveralls(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Coveralls
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Coveralls', UserWarning)
return
if self.coveralls_token:
runner = coveralls.Coveralls(True, repo_token=self.coveralls_token,
service_name='circle-ci', service_job_id=self.build_num)
def get_coverage():
workman = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
return coveralls.reporter.CoverallReporter(workman, workman.config).report()
with patch.object(coveralls.Coveralls, 'get_coverage', return_value=get_coverage()):
runner.wear(dry_run=dry_run)
def upload_coverage_report_to_code_climate(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
Raises:
:obj:`BuildHelperError`: If error uploading code coverage to Code Climate
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Code Climate', UserWarning)
return
if self.code_climate_token:
code_climate_runner = CodeClimateRunner([
'--token', self.code_climate_token,
'--file', os.path.join(coverage_dirname, '.coverage'),
])
if not dry_run:
self.run_method_and_capture_stderr(code_climate_runner.run)
########################
# Documentation
########################
def create_documentation_template(self, dirname='.'):
""" Create Sphinx documentation template for a package
Args:
dirname (:obj:`str`, optional): path to package
Raises:
:obj:`ValueError`: if no package or more than one package is specified
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
packages = parser.get('sphinx-apidocs', 'packages').strip().split('\n')
if len(packages) != 1:
raise ValueError('Sphinx configuration auto-generation only supports 1 package')
if not os.path.isdir(os.path.join(dirname, self.proj_docs_dir)):
os.mkdir(os.path.join(dirname, self.proj_docs_dir))
for package in packages:
filenames = [
'conf.py',
'requirements.txt',
'conda.environment.yml',
'spelling_wordlist.txt',
'index.rst',
'overview.rst',
'installation.rst',
'about.rst',
'references.rst',
'references.bib',
]
context = {
"package": package,
'version': self.INITIAL_PACKAGE_VERSION,
'year': datetime.now().year,
'package_underline': '=' * len(package),
}
for filename in filenames:
template_filename = pkg_resources.resource_filename('karr_lab_build_utils', os.path.join('templates', 'docs', filename))
with open(template_filename, 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, self.proj_docs_dir, filename))
def make_documentation(self, spell_check=False):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Args:
spell_check (:obj:`bool`): if :obj:`True`, run spell checking
Raises:
:obj:`BuildHelperError`: If project name not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
os.mkdir(self.proj_docs_static_dir)
# build HTML documentation
self.run_method_and_capture_stderr(sphinx_build, [self.proj_docs_dir, self.proj_docs_build_html_dir])
# run spell check
if spell_check:
self.run_method_and_capture_stderr(sphinx_build, [
'-b', 'spelling',
'-d', self.proj_docs_build_doctrees_dir,
self.proj_docs_dir,
self.proj_docs_build_spelling_dir,
])
def compile_downstream_dependencies(self, dirname='.', packages_parent_dir='..', downstream_dependencies_filename=None):
""" Compile the downstream dependencies of a package and save them to :obj:`downstream_dependencies_filename`
Args:
dirname (:obj:`str`, optional): path to package
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
downstream_dependencies_filename (:obj:`str`, optional): path to save list of downstream dependencies in YAML format
Returns:
:obj:`list` of :obj:`str`: downstream dependencies
Raises:
:obj:`BuildHelperError`: if a package has more than one module
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
packages_parent_dir = os.path.abspath(packages_parent_dir)
# get the name of the current package
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
tmp = parser.get('coverage:run', 'source').strip().split('\n')
if len(tmp) != 1:
raise BuildHelperError('Package should have only one module')
this_pkg_name = tmp[0]
# collect the downstream dependencies by analyzing the requirements files of other packages
# :todo: support branches
downstream_dependencies = []
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
other_pkg_name = dirname[len(packages_parent_dir) + 1:]
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
if this_pkg_name in install_requires or this_pkg_name in extras_require['all']:
downstream_dependencies.append(other_pkg_name)
# save the downstream dependencies to a file
if downstream_dependencies_filename:
with open(downstream_dependencies_filename, 'w') as file:
yaml.dump(downstream_dependencies, file, default_flow_style=False)
# return the downstream dependencies
return downstream_dependencies
def are_package_dependencies_acyclic(self, packages_parent_dir='..'):
""" Check if the package dependencies are acyclic so they are supported by CircleCI
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
Returns:
:obj:`bool`: :obj:`True` if the package dependencies are acyclic
"""
graph = networkx.DiGraph()
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
graph.add_node(pkg)
# create edges for dependencies
dep_filename = os.path.join(dirname, '.circleci/downstream_dependencies.yml')
if os.path.isfile(dep_filename):
with open(dep_filename, 'r') as file:
deps = yaml.load(file)
for other_pkg in deps:
graph.add_edge(pkg, other_pkg)
try:
networkx.algorithms.cycles.find_cycle(graph)
return False
except networkx.NetworkXNoCycle:
return True
def visualize_package_dependencies(self, packages_parent_dir='..', out_filename='../package_dependencies.pdf'):
""" Visualize downstream package dependencies as a graph
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
out_filename (:obj:`str`, optional): path to save visualization
"""
basename, format = os.path.splitext(out_filename)
dot = graphviz.Digraph(format=format[1:])
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
dot.node(pkg, pkg)
# create edges for dependencies
dep_filename = os.path.join(dirname, '.circleci/downstream_dependencies.yml')
if os.path.isfile(dep_filename):
with open(dep_filename, 'r') as file:
deps = yaml.load(file)
for other_pkg in deps:
dot.edge(pkg, other_pkg)
dot.render(filename=basename, cleanup=True)
def trigger_tests_of_downstream_dependencies(self, downstream_dependencies_filename='.circleci/downstream_dependencies.yml',
dry_run=False):
""" Trigger CircleCI to test downstream dependencies listed in :obj:`downstream_dependencies_filename`
Args:
downstream_dependencies_filename (:obj:`str`, optional): path to YAML file which contains a list of downstream dependencies
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:todo: support branches
"""
# stop if this is a dry run
if dry_run:
return []
# stop if the tests didn't pass
test_results = self.get_test_results()
if test_results.get_num_errors() > 0 or test_results.get_num_failures() > 0:
return []
# read downstream dependencies
with open(downstream_dependencies_filename, 'r') as file:
packages = yaml.load(file)
# stop if there are no downstream dependencies
if not packages:
return []
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = os.getenv('UPSTREAM_BUILD_NUM', '0')
if not upstream_repo_name:
upstream_repo_name = self.repo_name
upstream_build_num = str(self.build_num)
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
upstream_build_time = dateutil.parser.parse(result['all_commit_details'][0]['committer_date'])
triggered_packages = []
for package in packages:
branch = 'master'
# get summary of recent builds
builds = self.run_circleci_api('', repo_name=package)
# don't trigger build if a build has already been triggered from the same upstream build
# this prevents building the same project multiple times, including infinite looping
already_queued = False
for build in builds:
# don'trigger a build if this is the same package which triggered the cascade
if package == upstream_repo_name and \
str(build['build_num']) == upstream_build_num and \
build['build_num'] != self.build_num:
already_queued = True
break
# don't trigger a build if the package already been triggered from the same upstream commit
build_parameters = build['build_parameters']
if build_parameters and 'UPSTREAM_REPONAME' in build_parameters and \
build_parameters['UPSTREAM_REPONAME'] == upstream_repo_name and \
build_parameters['UPSTREAM_BUILD_NUM'] == upstream_build_num:
already_queued = True
break
# don't trigger a build if the package has already been more recently tested than the commit time
build_start_time = build['start_time']
if build_start_time is None or dateutil.parser.parse(build['start_time']) > upstream_build_time:
already_queued = True
break
if already_queued:
continue
# trigger build
self.run_circleci_api('/tree/{}'.format(branch), method='post', repo_name=package, data={
'build_parameters': {
'UPSTREAM_REPONAME': upstream_repo_name,
'UPSTREAM_BUILD_NUM': upstream_build_num,
}
})
triggered_packages.append(package)
return triggered_packages
def get_version(self):
""" Get the version of this package
Returns:
:obj:`str`: the version
"""
return '{0:s} (Python {1[0]:d}.{1[1]:d}.{1[2]:d})'.format(karr_lab_build_utils.__version__, sys.version_info)
@staticmethod
def get_python_version():
""" Get the Python version
Returns:
:obj:`str`: the Python version
"""
return '{0[0]:d}.{0[1]:d}.{0[2]:d}'.format(sys.version_info)
def run_method_and_capture_stdout(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
Returns:
:obj:`str`: stdout
"""
with abduct.captured(abduct.out(), abduct.err()) as (stdout, stderr):
result = func(*args, **kwargs)
out_msg = stdout.getvalue()
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
return out_msg
def run_method_and_capture_stderr(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
"""
with abduct.captured(abduct.err()) as stderr:
result = func(*args, **kwargs)
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
def analyze_package(self, package_name, messages=None):
""" Perform static analyses of a package using Pylint.
The default options will identify the following issues:
* Unused imported modules, classes, functions, and variables
* Reimported modules, classes, functions, and variables
* Wild card imports outside of __init__.py
* Duplicate arguments and keys
* Missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
messages (:obj:`list` of :obj:`str`): list of Pylint checks to perform
"""
if messages is None:
messages = [
# variables
'W0611', # unused-import
'W0614', # unused-wildcard-import
'W0613', # unused-argument
'W0612', # unused-variable
# imports
'W0404', # reimported
'W0401', # wildcard-import
# similarities
'E0108', # duplicate-argument-name
'W0109', # duplicate-key
]
msg_opts = [
'--disable=all',
'--enable=' + ','.join(messages),
]
report_opts = [
'--reports=n',
'--score=n',
]
# TODO: debug, does not work:
epylint.lint(package_name, msg_opts + report_opts)
def find_missing_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: list of missing dependencies and their occurences in the code
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_missing_reqs.log.setLevel(logging.ERROR)
missing = pip_check_reqs.find_missing_reqs.find_missing_reqs(options)
# filter out optional dependencies
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = install_requires
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps += opt_deps
missing = list(filter(lambda m: m[0].replace('-', '_') not in all_deps, missing))
return missing
def find_unused_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding unused_requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: name of the unused dependencies
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.ignore_reqs = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_extra_reqs.log.setLevel(logging.ERROR)
# get all requirements
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = set(install_requires)
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps = all_deps | set(opt_deps)
all_deps = [dep.replace('_', '-') for dep in all_deps]
# find unused requirements
with mock.patch('pip_check_reqs.common.find_required_modules', return_value=all_deps):
unuseds = pip_check_reqs.find_extra_reqs.find_extra_reqs(options)
# correct for editablly-installed packages
useds = pip_check_reqs.common.find_imported_modules(options).keys()
useds = [used.partition('.')[0].replace('_', '-') for used in useds]
unuseds = list(set(unuseds).difference(set(useds)))
# return canonical names
unuseds = [unused.replace('-', '_') for unused in unuseds]
return unuseds
def upload_package_to_pypi(self, dirname='.', repository='pypi', pypi_config_filename='~/.pypirc'):
""" Upload a package to PyPI
Args:
dirname (:obj:`str`, optional): path to package to upload
repository (:obj:`str`, optional): repository to upload code to (section in .pypirc or a full URL)
pypi_config_filename (:obj:`str`, optional): path to .pypirc
"""
# cleanup
if os.path.isdir(os.path.join(dirname, 'build')):
shutil.rmtree(os.path.join(dirname, 'build'))
if os.path.isdir(os.path.join(dirname, 'dist')):
shutil.rmtree(os.path.join(dirname, 'dist'))
# package code
subprocess.check_call([sys.executable, os.path.join(os.path.abspath(dirname), 'setup.py'), 'sdist', 'bdist_wheel'],
cwd=dirname)
# upload
options = []
if repository:
options += ['--repository', repository]
if pypi_config_filename:
options += ['--config-file', os.path.abspath(os.path.expanduser(pypi_config_filename))]
uploads = []
for path in glob.glob(os.path.join(dirname, 'dist', '*')):
uploads.append(path)
twine.commands.upload.main(options + uploads)
# cleanup
shutil.rmtree(os.path.join(dirname, 'build'))
shutil.rmtree(os.path.join(dirname, 'dist'))
def run_circleci_api(self, command, method='get', repo_type=None, repo_owner=None, repo_name=None,
data=None, circleci_api_token=None):
""" Run the CircleCI API
Args:
command (:obj:`str`): API command
method (:obj:`str`): type of HTTP request (get, post, delete)
repo_type (:obj:`str`, optional): repository type (e.g., github)
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
data (:obj:`str`, optional): data
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: CircleCI result
Raises:
:obj:`requests.exceptions.HTTPError`: if the HTTP request to CircleCI does not succeed
"""
if not repo_type:
repo_type = self.repo_type
if not repo_owner:
repo_owner = self.repo_owner
if not repo_name:
repo_name = self.repo_name
if not circleci_api_token:
circleci_api_token = self.circleci_api_token
url = '{}/project/{}/{}/{}{}?circle-token={}'.format(
self.CIRCLE_API_ENDPOINT, repo_type, repo_owner, repo_name, command, circleci_api_token)
request_method = getattr(requests, method)
response = request_method(url, json=data)
response.raise_for_status()
return response.json()
class TestResults(object):
""" Unit test results
Attributes:
cases (:obj:`list` of :obj:`TestCase`): test case results
"""
def __init__(self):
self.cases = []
@property
def num_tests(self):
return self.get_num_tests()
@property
def num_passed(self):
return self.get_num_passed()
@property
def num_skipped(self):
return self.get_num_skipped()
@property
def num_errors(self):
return self.get_num_errors()
@property
def num_failures(self):
return self.get_num_failures()
def get_num_tests(self):
""" Get the number of tests
Returns:
:obj:`int`: number of tests
"""
return len(self.cases)
def get_num_passed(self):
""" Get the number of tests that passed
Returns:
:obj:`int`: number of tests that passed
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.passed, self.cases)))
def get_num_skipped(self):
""" Get the number of skipped tests
Returns:
:obj:`int`: number of skipped tests
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.skipped, self.cases)))
def get_num_errors(self):
""" Get the number of tests with errors
Returns:
:obj:`int`: number of tests with errors
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.error, self.cases)))
def get_num_failures(self):
""" Get the number of tests with failures
Returns:
:obj:`int`: number of tests with failures
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.failure, self.cases)))
class TestCaseResult(object):
""" The result of a test case
Attributes:
classname (obj:`str`): name of the class of the test case
name (obj:`str`): name of the test case
filename (obj:`str`): file where the test was defined
line (obj:`int`): line where the test was defined
python_version (obj:`str`): python version which ran the test
type (obj:`TestCaseResultType`): type of the result (pass, skip, error, failure)
subtype (obj:`str`): detailed type of the result
message (obj:`str`): message from the result
details (obj:`str`): detailed message from the result
time (obj:`float`): duration of the time in seconds
stdout (obj:`str`): standard output
stderr (obj:`str`): standard error
"""
def __init__(self):
self.classname = None
self.name = None
self.filename = None
self.line = None
self.python_version = None
self.time = None
self.stdout = None
self.stderr = None
self.type = None
self.subtype = None
self.message = None
self.details = None
class TestCaseResultType(enum.Enum):
""" Type of test case result """
passed = 0
skipped = 1
error = 2
failure = 3
class BuildHelperError(Exception):
""" Represents :obj:`BuildHelper` errors """
pass
debug circleci updating
""" Karr Lab build utilities
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2016-08-02
:Copyright: 2016, Karr Lab
:License: MIT
"""
from codeclimate_test_reporter.components.runner import Runner as CodeClimateRunner
from datetime import datetime
from jinja2 import Template
from pylint import epylint
from sphinx.cmd.build import main as sphinx_build
from sphinx.apidoc import main as sphinx_apidoc
from mock import patch
from six.moves import configparser
from xml.dom import minidom
import abduct
import attrdict
import capturer
import click
import coverage
import coveralls
import dateutil.parser
import email
import email.header
import email.message
import email.utils
import enum
import fnmatch
import ftputil
import github
import glob
import graphviz
# import instrumental.api
import json
import karr_lab_build_utils
import logging
import mock
import networkx
import nose
import os
import pip
import pip_check_reqs
import pip_check_reqs.find_extra_reqs
import pip_check_reqs.find_missing_reqs
# import pkg_utils
# pkg_utils is not imported globally so that we can use karr_lab_build_utils to properly calculate its coverage
# :todo: figure out how to fix this
import pkg_resources
import pytest
import re
import requests
import shutil
import smtplib
import subprocess
import sys
import tempfile
import time
import twine.commands.upload
import yaml
import warnings
import whichcraft
class CoverageType(enum.Enum):
""" Types of coverage """
statement = 0
branch = 1
multiple_condition = 2
decision = 2
class Environment(enum.Enum):
""" Environments to run tests """
local = 0
docker = 1
circleci = 2
class BuildHelper(object):
""" Utility class to help build projects:
* Run tests
* Archive reports to test history server, Coveralls, and Code Climate
Attributes:
test_runner (:obj:`str`): name of test runner {pytest, nose}
repo_name (:obj:`str`): repository name
repo_owner (:obj:`str`): name of the repository owner
repo_branch (:obj:`str`): repository branch name
repo_revision (:obj:`str`): sha of repository revision
build_num (:obj:`int`): CircleCI build number
proj_tests_dir (:obj:`str`): local directory with test code
proj_tests_xml_dir (:obj:`str`): local directory to store latest XML test report
proj_tests_xml_latest_filename (:obj:`str`): file name to store latest XML test report
proj_docs_dir (:obj:`str`): local directory with Sphinx configuration
proj_docs_static_dir (:obj:`str`): local directory of static documentation files
proj_docs_source_dir (:obj:`str`): local directory of source documentation files created by sphinx-apidoc
proj_docs_build_doctrees_dir (:obj:`str`): local directory where doc trees should be saved
proj_docs_build_html_dir (:obj:`str`): local directory where generated HTML documentation should be saved
proj_docs_build_spelling_dir (:obj:`str`): local directory where spell check results should be saved
build_image (:obj:`str`): Docker image to use to run tests
coveralls_token (:obj:`str`): Coveralls token
code_climate_token (:obj:`str`): Code Climate token
github_username (obj:`str`): GitHub username
github_password (obj:`str`): GitHub password
circleci_api_token (:obj:`str`): CircleCI API token
test_server_token (:obj:`str`): test history report server token
karr_lab_daemon_gmail_password (:obj:`obj:`str`): password for karr.lab.daemon@gmail.com
INITIAL_PACKAGE_VERSION (:obj:`str`): initial package version
DEFAULT_BUILD_IMAGE_VERSION (:obj:`str`): default build image version
DEFAULT_TEST_RUNNER (:obj:`str`): default test runner {pytest, nose}
DEFAULT_PROJ_TESTS_DIR (:obj:`str`): default local directory with test code
DEFAULT_PROJ_TESTS_XML_DIR (:obj:`str`): default local directory where the test reports generated should be saved
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME (:obj:`str`): default file name to store latest XML test report
DEFAULT_PROJ_DOCS_DIR (:obj:`str`): default local directory with Sphinx configuration
DEFAULT_PROJ_DOCS_STATIC_DIR (:obj:`str`): default local directory of static documentation files
DEFAULT_PROJ_DOCS_SOURCE_DIR (:obj:`str`): default local directory of source documentation files created by sphinx-apidoc
DEFAULT_PROJ_DOCS_SPELLING_DIR (:obj:`str`): default local directory where spell check results should be saved
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR (:obj:`str`): default local directory where generated HTML documentation should be saved
DEFAULT_BUILD_IMAGE (:obj:`str`): default Docker image to use to run tests
GITHUB_API_ENDPOINT (:obj:`str`): GitHub API endpoint
CIRCLE_API_ENDPOINT (:obj:`str`): CircleCI API endpoint
COVERALLS_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Coveralls
CODE_CLIMATE_ENABLED (:obj:`bool`): if :obj:`True`, upload coverage reports to Code Climate
"""
INITIAL_PACKAGE_VERSION = '0.0.1'
DEFAULT_BUILD_IMAGE_VERSION = '0.0.19'
DEFAULT_TEST_RUNNER = 'pytest'
DEFAULT_PROJ_TESTS_DIR = 'tests'
DEFAULT_PROJ_TESTS_XML_DIR = 'tests/reports'
DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME = 'latest'
DEFAULT_PROJ_DOCS_DIR = 'docs'
DEFAULT_PROJ_DOCS_STATIC_DIR = 'docs/_static'
DEFAULT_PROJ_DOCS_SOURCE_DIR = 'docs/source'
DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR = 'docs/_build/doctrees'
DEFAULT_PROJ_DOCS_BUILD_HTML_DIR = 'docs/_build/html'
DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR = 'docs/_build/spelling'
DEFAULT_BUILD_IMAGE = 'karrlab/build:latest'
GITHUB_API_ENDPOINT = 'https://api.github.com'
CIRCLE_API_ENDPOINT = 'https://circleci.com/api/v1.1'
COVERALLS_ENABLED = True
CODE_CLIMATE_ENABLED = True
def __init__(self):
""" Construct build helper """
# get settings from environment variables
self.test_runner = os.getenv('TEST_RUNNER', self.DEFAULT_TEST_RUNNER)
if self.test_runner not in ['pytest', 'nose']:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
self.repo_type = 'github'
self.repo_name = os.getenv('CIRCLE_PROJECT_REPONAME')
self.repo_owner = os.getenv('CIRCLE_PROJECT_USERNAME') or 'KarrLab'
self.repo_branch = os.getenv('CIRCLE_BRANCH')
self.repo_revision = os.getenv('CIRCLE_SHA1')
try:
self.build_num = int(float(os.getenv('CIRCLE_BUILD_NUM')))
except (TypeError, ValueError, ):
self.build_num = 0
self.proj_tests_dir = self.DEFAULT_PROJ_TESTS_DIR
self.proj_tests_xml_dir = self.DEFAULT_PROJ_TESTS_XML_DIR
self.proj_tests_xml_latest_filename = self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME
self.proj_docs_dir = self.DEFAULT_PROJ_DOCS_DIR
self.proj_docs_static_dir = self.DEFAULT_PROJ_DOCS_STATIC_DIR
self.proj_docs_source_dir = self.DEFAULT_PROJ_DOCS_SOURCE_DIR
self.proj_docs_build_doctrees_dir = self.DEFAULT_PROJ_DOCS_BUILD_DOCTREES_DIR
self.proj_docs_build_html_dir = self.DEFAULT_PROJ_DOCS_BUILD_HTML_DIR
self.proj_docs_build_spelling_dir = self.DEFAULT_PROJ_DOCS_BUILD_SPELLING_DIR
self.build_image = self.DEFAULT_BUILD_IMAGE
self.coveralls_token = os.getenv('COVERALLS_REPO_TOKEN')
self.code_climate_token = os.getenv('CODECLIMATE_REPO_TOKEN')
self.github_username = os.getenv('GITHUB_USERNAME')
self.github_password = os.getenv('GITHUB_PASSWORD')
self.circleci_api_token = os.getenv('CIRCLECI_API_TOKEN')
self.test_server_token = os.getenv('TEST_SERVER_TOKEN')
self.karr_lab_daemon_gmail_password = os.getenv('KARR_LAB_DAEMON_GMAIL_PASSWORD')
self.code_server_hostname = 'code.karrlab.org'
self.code_server_username = 'karrlab_code'
self.code_server_password = os.getenv('CODE_SERVER_PASSWORD')
self.code_server_directory = '/code.karrlab.org/repo'
#####################
# Create a package
#####################
def create_package(self):
""" Create a package
* Create a local Git repository
* Create a remote GitHub repository
* Add the repository to Code Climate
* Add the repository to Coveralls
* Add the repository to CircleCI project (by following the GitHub repository)
* Add environment variable for tokens for code.karrlab.org, Coveralls, Code Climate, and CircleCI
* Add environment variable for password for karr.lab.daemon@gmail.com
* Generate API token for status badge
* If the repository is not private, add the repository to Read the Docs
* Add the package to code.karrlab.org
* Add JSON-formatted file to ``ssh://code.karrlab.org:/home/karrlab_code/code.karrlab.org/repo/{{ name }}.json``
* Add badges for Code Climate, Coveralls, CircleCI, and Read the Docs to README.md
* Add package name to ``.circleci/downstream_dependencies.yml`` files of all dependencies
"""
# print introductory message
print('This program will guide you through creating a new package.')
click.confirm('Continue?', default=True, abort=True)
# gather basic information
name = click.prompt('Enter the name of the new package', type=str)
description = click.prompt('Enter a brief description of the new package', type=str)
keywords = click.prompt('Enter a comma-separated list of keywords for the new package', type=str, default=' ')
keywords = [kw.strip() for kw in keywords.strip().split(',') if kw.strip()]
dependencies = click.prompt(
'Enter a comma-separated list of Karr Lab packages that the new package depends on', type=str, default=' ')
dependencies = [dep.strip() for dep in dependencies.strip().split(',') if dep.strip()]
private = click.confirm('Should the repository be private?', default=True)
dirname = click.prompt('Enter the directory for the new package', type=str, default=os.path.join('.', name))
build_image_version = click.prompt('Enter the build image version to test the package',
type=str, default=self.DEFAULT_BUILD_IMAGE_VERSION)
github_username = click.prompt('Enter your GitHub username', type=str, default=self.github_username)
github_password = click.prompt('Enter your GitHub password', type=str, hide_input=True,
default='*' * len(self.github_password or ''))
if github_password == '*' * len(self.github_password or ''):
github_password = self.github_password
circleci_api_token = click.prompt('Enter the CircleCI API token for the karr-lab-daemon GitHub account',
type=str, hide_input=True, default='*' * len(self.circleci_api_token or ''))
if circleci_api_token == '*' * len(self.circleci_api_token or ''):
circleci_api_token = self.circleci_api_token
test_server_token = click.prompt('Enter the token for tests.karrlab.org', type=str,
hide_input=True, default='*' * len(self.test_server_token or ''))
if test_server_token == '*' * len(self.test_server_token or ''):
test_server_token = self.test_server_token
karr_lab_daemon_gmail_password = click.prompt('Enter the password for karr.lab.daemon@gmail.com',
type=str, hide_input=True,
default='*' * len(self.karr_lab_daemon_gmail_password or ''))
if karr_lab_daemon_gmail_password == '*' * len(self.karr_lab_daemon_gmail_password or ''):
karr_lab_daemon_gmail_password = self.karr_lab_daemon_gmail_password
code_server_username = click.prompt('Enter your username for ftp://' + self.code_server_hostname,
type=str, default=self.code_server_username)
code_server_password = click.prompt('Enter your password for ftp://' + self.code_server_hostname,
type=str, hide_input=True, default='*' * len(self.code_server_password or ''))
if code_server_password == '*' * len(self.code_server_password or ''):
code_server_password = self.code_server_password
# create local and GitHub Git repositories
print('Creating {} remote Git repository "{}/{}" on GitHub and cloning this repository to "{}"'.format(
'private' if private else 'public', self.repo_owner, name, dirname))
self.create_repository(name, description=description, private=private, dirname=dirname,
github_username=github_username, github_password=github_password)
# Code Climate
# :todo: programmatically add repo to Code Climate and generate tokens
print('Visit "https://codeclimate.com/dashboard" and click on the "{}" organization.'.format(
self.repo_owner if private else 'Open source'))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Sync now" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add a repository" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Add repo" button for the "{}" repository'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "settings" link'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Cick the "Test coverage" menu item')
click.confirm('Continue?', default=True, abort=True)
codeclimate_repo_token = click.prompt('Enter the "test reporter id"')
print('Cick the "Badges" menu item')
click.confirm('Continue?', default=True, abort=True)
codeclimate_repo_id = click.prompt('Enter the repository ID (ID in the URL https://codeclimate.com/repos/<id>/maintainability)')
codeclimate_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://api.codeclimate.com/v1/badges/<token>/maintainability)')
# Coveralls
# :todo: programmatically add repo to Coveralls and generate tokens
print('Visit "https://coveralls.io/repos/new"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "SYNC REPOS" button')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}/{}" repository and click its "OFF" button'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the details button for the "{}/{}" repository'.format(self.repo_owner, name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Settings" menu item')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_token = click.prompt('Enter the "REPO TOKEN"')
print('Click the "README BADGE" EMBED" button')
click.confirm('Continue?', default=True, abort=True)
coveralls_repo_badge_token = click.prompt(
'Enter the badge token (token in the URL https://coveralls.io/repos/github/KarrLab/test_a/badge.svg?t=<token>')
# CircleCI
# :todo: programmatically create CircleCI build
# :todo: programmatically create CircleCI token for status badges
has_private_dependencies = False
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
for dependency in dependencies:
try:
repo = org.get_repo(dependency)
has_private_dependencies = has_private_dependencies or repo.private
except github.UnknownObjectException:
pass
print('Visit "https://circleci.com/add-projects/gh/KarrLab"')
click.confirm('Continue?', default=True, abort=True)
print('Search for the "{}" repository and click its "Follow project" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Project settings" icon')
click.confirm('Continue?', default=True, abort=True)
if has_private_dependencies:
print('Click the "Checkout SSH keys" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Authorize with GitHub" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create and add ... user key" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "API permissions" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Create Token" button')
click.confirm('Continue?', default=True, abort=True)
print('Select "All", enter a label, and click the "Add Token" button')
click.confirm('Continue?', default=True, abort=True)
circleci_repo_token = click.prompt('Enter the new token')
vars = {
'CIRCLECI_API_TOKEN': circleci_api_token,
'COVERALLS_REPO_TOKEN': coveralls_repo_token,
'CODECLIMATE_REPO_TOKEN': codeclimate_repo_token,
'KARR_LAB_DAEMON_GMAIL_PASSWORD': karr_lab_daemon_gmail_password,
'TEST_SERVER_TOKEN': test_server_token,
}
self.set_circleci_environment_variables(vars, repo_name=name, circleci_api_token=circleci_api_token)
# Read the Docs
if not private:
# :todo: programmatically add repo to Read the Docs
print('Visit "https://readthedocs.org/dashboard/import/?"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "refresh" icon')
click.confirm('Continue?', default=True, abort=True)
print('Find the "{}" repository and click its "+" button'.format(name))
click.confirm('Continue?', default=True, abort=True)
print('Click the "Next" button')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Admin" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Advanced settings" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Requirements file" to "docs/requirements.txt"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python configuration file" to "docs/conf.py"')
click.confirm('Continue?', default=True, abort=True)
print('Set the "Python interpreter" to "CPython 3.x"')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Maintainers" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr" to the maintainers')
click.confirm('Continue?', default=True, abort=True)
print('Click the "Notifications" menu item')
click.confirm('Continue?', default=True, abort=True)
print('Add your email address and click submit')
click.confirm('Continue?', default=True, abort=True)
print('Add "jonrkarr@gmail.com" and click submit')
click.confirm('Continue?', default=True, abort=True)
# add package to code.karrlab.org
with open(pkg_resources.resource_filename('karr_lab_build_utils',
os.path.join('templates', 'code_server', '_package_.json')), 'r') as file:
template = Template(file.read())
fid, local_filename = tempfile.mkstemp()
os.close(fid)
context = {
'name': name,
'description': description,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_token': coveralls_repo_token,
'codeclimate_repo_id': codeclimate_repo_id,
}
template.stream(**context).dump(local_filename)
with ftputil.FTPHost(self.code_server_hostname, code_server_username, code_server_password) as ftp:
remote_filename = ftp.path.join(self.code_server_directory, '{}.json'.format(name))
ftp.upload(local_filename, remote_filename)
os.remove(local_filename)
# setup repository
self.setup_repository(name, description=description, keywords=keywords, dependencies=dependencies,
private=private, build_image_version=build_image_version, dirname=dirname,
circleci_repo_token=circleci_repo_token, coveralls_repo_badge_token=coveralls_repo_badge_token,
codeclimate_repo_id=codeclimate_repo_id, codeclimate_repo_badge_token=codeclimate_repo_badge_token)
# append package to downstream dependencies of dependencies
parent_dirname = os.path.dirname(dirname)
for dependency in dependencies:
downstream_deps_filename = os.path.join(parent_dirname, dependency, '.circleci', 'downstream_dependencies.yml')
if os.path.isfile(downstream_deps_filename):
with open(downstream_deps_filename, 'r') as file:
downstream_deps = yaml.load(file)
downstream_deps.append(name)
with open(downstream_deps_filename, 'w') as file:
yaml.dump(downstream_deps, file, default_flow_style=False)
else:
warnings.warn(('Unable to append package to downstream dependency {} because the '
'downstream dependency is not available').format(dependency),
UserWarning)
def create_repository(self, name, description='', private=True, dirname=None, github_username=None, github_password=None):
""" Create a Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
dirname (:obj:`str`, optional): directory name for repository
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
"""
# process arguments
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
dirname = dirname or os.path.join('.', name)
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
# create GitHub repository
g = github.Github(github_username, github_password)
org = g.get_organization('KarrLab')
org.create_repo(name=name, description=description, private=private, auto_init=True)
# initialize Git
gitconfig_filename = os.path.expanduser('~/.gitconfig')
has_gitconfig = os.path.isfile(gitconfig_filename)
if has_gitconfig:
os.rename(gitconfig_filename, gitconfig_filename + '.ignore')
import pygit2
credentials = pygit2.UserPass(github_username, github_password)
callbacks = pygit2.RemoteCallbacks(credentials=credentials)
pygit2.clone_repository('https://github.com/KarrLab/{}.git'.format(name), dirname, callbacks=callbacks)
if has_gitconfig:
os.rename(gitconfig_filename + '.ignore', gitconfig_filename)
def setup_repository(self, name, description='', keywords=None, dependencies=None, private=True, build_image_version=None,
dirname=None, circleci_repo_token=None, coveralls_repo_badge_token=None, codeclimate_repo_id=None,
codeclimate_repo_badge_token=None):
""" Setup Git repository with the default directory structure
Args:
name (:obj`str`): package name
description (:obj:`str`, optional): package description
keywords (:obj:`list` of :obj:`str`, optional): list of keywords
dependencies (:obj:`list` of :obj:`str`, optional): list of Karr Lab packages that the package depends on
private (:obj:`bool`, optional): if :obj:`False`, make the GitHub repository public and set
up documentation generation with Read the Docs
build_image_version (:obj:`str`, optional): build image version
dirname (:obj:`str`, optional): directory name
circleci_repo_token (:obj:`str`, optional): CircleCI API token (e.g. for badges) for the repository
coveralls_repo_badge_token (:obj:`str`, optional): Coveralls badge token for the repository
codeclimate_repo_id (:obj:`str`, optional): Code Climate ID for the repository
codeclimate_repo_badge_token (:obj:`str`, optional): Code Climate for the repository
"""
if not re.match('^[a-z][a-z0-9_]*$', name):
raise BuildHelperError("'{}' not valid: Repository names should start with a letter and only include lower "
"case letters, numbers, and underscores".format(name))
keywords = keywords or []
dependencies = dependencies or []
if not build_image_version:
build_image_version = self.DEFAULT_BUILD_IMAGE_VERSION
dirname = dirname or os.path.join('.', name)
# create a directory for the repository
if not os.path.isdir(dirname):
os.makedirs(dirname)
# create files
filenames = (
'.gitignore',
'LICENSE',
'MANIFEST.in',
'README.md',
'requirements.txt',
'requirements.optional.txt',
'setup.py',
'setup.cfg',
'tests/requirements.txt',
'tests/test_core.py',
'tests/test_main.py',
'.circleci/config.yml',
'.circleci/downstream_dependencies.yml',
'.readthedocs.yml',
'_package_/__init__.py',
'_package_/VERSION',
'_package_/core.py',
'_package_/__main__.py',
)
now = datetime.now()
context = {
'name': name,
'description': description,
'keywords': keywords,
'version': self.INITIAL_PACKAGE_VERSION,
'year': now.year,
'date': '{}-{}-{}'.format(now.year, now.month, now.day),
'dependencies': dependencies,
'build_image_version': build_image_version,
'private': private,
'circleci_repo_token': circleci_repo_token,
'coveralls_repo_badge_token': coveralls_repo_badge_token,
'codeclimate_repo_id': codeclimate_repo_id,
'codeclimate_repo_badge_token': codeclimate_repo_badge_token,
}
for filename in filenames:
if os.path.dirname(filename) and not os.path.isdir(os.path.join(dirname, os.path.dirname(filename))):
os.makedirs(os.path.join(dirname, os.path.dirname(filename)))
with open(pkg_resources.resource_filename(
'karr_lab_build_utils',
os.path.join('templates', filename)), 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, filename))
os.rename(os.path.join(dirname, '_package_'), os.path.join(dirname, name))
self.create_documentation_template(dirname)
###########################
# Register repo on CircleCI
###########################
def follow_circleci_build(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None,
has_private_dependencies=False):
""" Follow CircleCI build for a repository
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
has_private_dependencies (:obj:`bool`, optional): if :obj:`True`, add a GitHub SSH key for the Karr Lab machine user to the build
Raises:
:obj:`ValueError`: if a CircleCI build wasn't followed and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# follow repo
result = self.run_circleci_api('/follow',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
if 'following' not in result or not result['following']:
raise ValueError(
'Unable to follow CircleCI build for repository {}/{}'.format(repo_owner, repo_name))
# add checkout key
if has_private_dependencies:
# :todo: add a GitHub SSH key for the Karr Lab machine user to the build
pass # pragma: no cover
def get_circleci_environment_variables(self, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Get the CircleCI environment variables for a repository and their partial values
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their partial values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
vars = self.run_circleci_api('/envvar',
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
return {var['name']: var['value'] for var in vars}
def set_circleci_environment_variables(self, vars, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Set the CircleCI environment variables for a repository
Args:
vars (:obj:`dict`): dictionary of environment variables to set
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: dictionary of environment variables and their values
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
# get current environment variables
old_vars = self.get_circleci_environment_variables(
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# update environment variables
for name, value in vars.items():
# delete environment variables which we want to overwrite
if name in old_vars:
self.delete_circleci_environment_variable(name,
repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
# add environment variable
self.run_circleci_api('/envvar',
method='post', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token, data={'name': name, 'value': value})
def delete_circleci_environment_variable(self, var, repo_type=None, repo_owner=None, repo_name=None, circleci_api_token=None):
""" Delete a CircleCI environment variable for a repository
Args:
var (:obj:`str`): name of variable to delete
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
circleci_api_token (:obj:`str`, optional): CircleCI API token
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if circleci_api_token is None:
circleci_api_token = self.circleci_api_token
self.run_circleci_api('/envvar/{}'.format(var),
method='delete', repo_type=repo_type, repo_owner=repo_owner, repo_name=repo_name,
circleci_api_token=circleci_api_token)
def create_codeclimate_github_webhook(self, repo_type=None, repo_owner=None, repo_name=None,
github_username=None, github_password=None):
""" Create GitHub webhook for Code Climate
Args:
repo_type (:obj:`str`, optional): repository type
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
github_username (:obj:`str`, optional): GitHub username
github_password (:obj:`str`, optional): GitHub password
Raises:
:obj:`ValueError`: if webhook wasn't created and didn't already exist
"""
if repo_type is None:
repo_type = self.repo_type
if repo_owner is None:
repo_owner = self.repo_owner
if repo_name is None:
repo_name = self.repo_name
if github_username is None:
github_username = self.github_username
if github_password is None:
github_password = self.github_password
url = '{}/repos/{}/{}/hooks'.format(self.GITHUB_API_ENDPOINT, repo_owner, repo_name)
response = requests.post(url, auth=(github_username, github_password), json={
'name': 'web',
'config': {
'url': 'https://codeclimate.com/webhooks',
'content_type': 'form',
},
'events': [
'push',
'pull_request'
],
'active': True,
})
if response.status_code != 201:
if 'errors' in response.json():
msg = response.json()['errors'][0]['message']
else:
msg = response.json()['message']
raise ValueError('Unable to create webhook for {}/{}: {}'.format(repo_owner, repo_name, msg))
#########################
# Installing dependencies
#########################
def install_requirements(self):
""" Install requirements """
# upgrade pip, setuptools
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'setuptools'])
self.run_method_and_capture_stderr(pip.main, ['install', '-U', 'pip'])
# requirements for package
self._install_requirements_helper('requirements.txt')
self._install_requirements_helper('requirements.optional.txt', ignore_options=True)
self._install_requirements_helper(os.path.join(self.proj_tests_dir, 'requirements.txt'))
self._install_requirements_helper(os.path.join(self.proj_docs_dir, 'requirements.txt'))
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
def _install_requirements_helper(self, filename, ignore_options=False):
""" Install the packages in a requirements.txt file, including all optional dependencies
Args:
filename (:obj:`str`): path to requirements file
ignore_options (:obj:`bool`, optional): if :obj:`True`, ignore option headings
(e.g. for requirements.optional.txt)
"""
if not os.path.isfile(filename):
return
# create a temporary file that has the optional markings removed
if ignore_options:
sanitized_file, sanitized_filename = tempfile.mkstemp(suffix='.txt')
os.close(sanitized_file)
with open(filename, 'r') as file:
with open(sanitized_filename, 'w') as sanitized_file:
for line in file:
line = line.strip()
if line and line[0] == '[':
continue
sanitized_file.write(line + '\n')
filename = sanitized_filename
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links', '-r', filename])
# cleanup temporary file
if ignore_options:
os.remove(sanitized_filename)
def upgrade_requirements(self):
""" Upgrade requirements from the Karr Lab's GitHub organization
Returns:
:obj:`list` of :obj:`str`: upgraded requirements from the Karr Lab's GitHub organization
"""
# get PyPI requirements
lines = self.run_method_and_capture_stdout(pip.main, ['freeze'])
pkgs = []
for line in lines.split('\n'):
if not line.startswith('-e') and '==' in line:
pkgs.append(line.partition('==')[0])
infos = self.run_method_and_capture_stdout(pip.main, ['show'] + pkgs)
reqs = []
for info in infos.split('---\n'):
if 'github.com/KarrLab/' in info:
name = info.partition('Name: ')[2].partition('\n')[0].replace('-', '_')
url = info.partition('Home-page: ')[2].partition('\n')[0]
reqs.append('git+{}.git#egg={}[all]'.format(url, name))
# ugrade PyPI requirements
self.run_method_and_capture_stderr(pip.main, ['install', '-U', '--process-dependency-links'] + reqs)
# upgrade CircleCI
if whichcraft.which('docker') and whichcraft.which('circleci'):
subprocess.check_call(['circleci', 'update'])
return reqs
########################
# Running tests
########################
def run_tests(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, environment=Environment.local, exit_on_failure=True,
ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path`.
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
environment (:obj:`str`, optional): environment to run tests (local, docker, or circleci-local-executor)
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key; needed for Docker environment
Raises:
:obj:`BuildHelperError`: If the environment is not supported or the package directory not set
"""
if environment == Environment.local:
self._run_tests_local(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, exit_on_failure=exit_on_failure)
elif environment == Environment.docker:
self._run_tests_docker(dirname=dirname, test_path=test_path, verbose=verbose, with_xunit=with_xunit,
with_coverage=with_coverage, coverage_dirname=coverage_dirname,
coverage_type=coverage_type, ssh_key_filename=ssh_key_filename)
elif environment == Environment.circleci:
self._run_tests_circleci(dirname=dirname, test_path=test_path, verbose=verbose, ssh_key_filename=ssh_key_filename)
else:
raise BuildHelperError('Unsupported environment: {}'.format(environment))
def _run_tests_local(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, exit_on_failure=True):
""" Run unit tests located at `test_path` locally
Optionally, generate a coverage report.
Optionally, save the results to a file
To configure coverage, place a .coveragerc configuration file in the root directory
of the repository - the same directory that holds .coverage. Documentation of coverage
configuration is in https://coverage.readthedocs.io/en/coverage-4.2/config.html
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
exit_on_failure (:obj:`bool`, optional): whether or not to exit on test failure
Raises:
:obj:`BuildHelperError`: If the package directory not set
"""
py_v = self.get_python_version()
abs_xml_latest_filename = os.path.join(
self.proj_tests_xml_dir, '{0}.{1}.xml'.format(self.proj_tests_xml_latest_filename, py_v))
if with_coverage:
if coverage_type == CoverageType.statement:
cov = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True)
cov.start()
elif coverage_type == CoverageType.branch:
cov = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'),
data_suffix=py_v, config_file=True, branch=True)
cov.start()
# elif coverage_type == CoverageType.multiple_condition:
# # :todo: support instrumental once its dependency astkit is updated for Python 3
# parser = configparser.ConfigParser()
# parser.read(os.path.join(dirname, 'setup.cfg'))
# targets = parser.get('coverage:run', 'source').strip().split('\n')
# targets = [target.strip() for target in targets]
#
# opts = attrdict.AttrDict({
# 'file': os.path.join(coverage_dirname, '.coverage.' + py_v),
# 'report': False,
# 'label': False,
# 'summary': False,
# 'statements': False,
# 'xml': False,
# 'html': False,
# 'all': False,
# 'targets': targets,
# 'ignores': [],
# 'report_conditions_with_literals': False,
# 'instrument_assertions': True,
# 'use_metadata_cache': False,
# 'instrument_comparisons': True,
# })
# cov = instrumental.api.Coverage(opts, os.getcwd())
# cov.start(opts.targets, opts.ignores)
else:
raise BuildHelperError('Unsupported coverage type: {}'.format(coverage_type))
if with_xunit and not os.path.isdir(self.proj_tests_xml_dir):
os.makedirs(self.proj_tests_xml_dir)
if self.test_runner == 'pytest':
test_path = test_path.replace(':', '::')
test_path = re.sub('::(.+?)(\.)', r'::\1::', test_path)
argv = [test_path]
if verbose:
argv.append('--capture=no')
if with_xunit:
argv.append('--junitxml=' + abs_xml_latest_filename)
result = pytest.main(argv)
elif self.test_runner == 'nose':
test_path = test_path.replace('::', ':', 1)
test_path = test_path.replace('::', '.', 1)
argv = ['nosetests', test_path]
if verbose:
argv.append('--nocapture')
if with_xunit:
argv += ['--with-xunit', '--xunit-file', abs_xml_latest_filename]
result = int(not nose.run(argv=argv))
else:
raise BuildHelperError('Unsupported test runner {}'.format(self.test_runner))
if with_coverage:
cov.stop() # pragma: no cover # this line can't be covered
cov.save()
if exit_on_failure and result != 0:
sys.exit(1)
def _run_tests_docker(self, dirname='.', test_path='tests', verbose=False, with_xunit=False, with_coverage=False, coverage_dirname='.',
coverage_type=CoverageType.statement, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using a Docker image:
#. Create a container based on the build image (e.g, karrlab/build:latest)
#. Copy your GitHub SSH key to the container
#. Remove Python cache directories (``__pycache__``) from the package
#. Copy the package to the container at ``/root/projects``
#. Install the Karr Lab build utilities into the container
#. Install the requirements for the package in the container
#. Run the tests inside the container using the same version of Python that called this method
#. Delete the container
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
with_xunit (:obj:`bool`, optional): whether or not to save test results
with_coverage (:obj:`bool`, optional): whether or not coverage should be assessed
coverage_dirname (:obj:`str`, optional): directory to save coverage data
coverage_type (:obj:`CoverageType`, optional): type of coverage to run when :obj:`with_coverage` is :obj:`True`
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
# pick container name
basename = os.path.basename(os.path.abspath(dirname))
now = datetime.now()
container = 'build-{0}-{1.year}-{1.month}-{1.day}-{1.hour}-{1.minute}-{1.second}'.format(basename, now)
# get Python version
py_v = '{}.{}'.format(sys.version_info[0], sys.version_info[1])
# create container
print('\n\n')
print('=====================================')
print('== Creating container')
print('=====================================')
self._run_docker_command(['run', '-it', '-d', '--name', container, self.build_image, 'bash'])
# copy GitHub SSH key to container
print('\n\n')
print('=====================================')
print('== Copying SSH key to container')
print('=====================================')
self._run_docker_command(['cp', ssh_key_filename, container + ':/root/.ssh/'])
# delete __pycache__ directories
print('\n\n')
print('=====================================')
print('== Deleting __pycache__ directories')
print('=====================================')
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# copy package to container
print('\n\n')
print('=====================================')
print('== Copying package to container')
print('=====================================')
self._run_docker_command(['cp', os.path.abspath(dirname), container + ':/root/project'])
# install pkg_utils
print('\n\n')
print('=====================================')
print('== Install pkg_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/pkg_utils.git#egg=pkg_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install Karr Lab build utils
print('\n\n')
print('=====================================')
print('== Install karr_lab_build_utils')
print('=====================================')
build_utils_uri = 'git+https://github.com/KarrLab/karr_lab_build_utils.git#egg=karr_lab_build_utils'
self._run_docker_command(['exec', container, 'bash', '-c',
'pip{} install -U --process-dependency-links {}'.format(py_v, build_utils_uri)])
# install package
print('\n\n')
print('=====================================')
print('== Install package')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && pip{} install --process-dependency-links -e .'.format(py_v)])
# install dependencies
print('\n\n')
print('=====================================')
print('== Install dependencies')
print('=====================================')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && karr_lab_build_utils{} upgrade-requirements'.format(py_v)])
# test package in container
print('\n\n')
print('=====================================')
print('== Running tests')
print('=====================================')
options = []
options += ['--test-path', test_path]
if with_coverage:
options += ['--with-coverage', '--coverage-type', coverage_type.name]
if with_xunit:
options.append('--with-xunit')
if verbose:
options.append('--verbose')
self._run_docker_command(['exec', container, 'bash', '-c',
'cd /root/project && karr_lab_build_utils{} run-tests {}'.format(py_v, ' '.join(options))],
raise_error=False)
if with_coverage:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', '.coverage.{}.*'.format(py_v))])
match = re.search('/root/project/(\.coverage\.\d+\.\d+\.\d+)', out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(coverage_dirname, match.group(1))])
if with_xunit:
out = self._run_docker_command(['exec', container, 'bash', '-c', 'ls -la ' +
os.path.join('/root', 'project', self.DEFAULT_PROJ_TESTS_XML_DIR,
'{}.{}.*.xml'.format(self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME, py_v))])
match = re.search('/root/project/{}/({}\.\d+\.\d+\.\d+.xml)'.format(self.DEFAULT_PROJ_TESTS_XML_DIR,
self.DEFAULT_PROJ_TESTS_XML_LATEST_FILENAME), out)
self._run_docker_command(['cp', container + ':' + match.group(0), os.path.join(self.proj_tests_xml_dir, match.group(1))])
# stop and remove container
print('\n\n')
print('=====================================')
print('== Removing container')
print('=====================================')
self._run_docker_command(['rm', '-f', container])
def _run_docker_command(self, cmd, cwd=None, raise_error=True):
""" Run a docker command
Args:
cmd (:obj:`list`): docker command to run
cwd (:obj:`str`, optional): directory from which to run :obj:`cmd`
raise_error (:obj:`bool`, optional): if true, raise errors
Returns:
:obj:`str`: standard output
Raises:
:obj:`BuildHelperError`: if the docker command fails
"""
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['docker'] + cmd, cwd=cwd)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
if process.returncode != 0 and raise_error:
raise BuildHelperError(out)
return out
def _run_tests_circleci(self, dirname='.', test_path='tests', verbose=False, ssh_key_filename='~/.ssh/id_rsa'):
""" Run unit tests located at `test_path` using the CircleCI local executor. This will run the same commands defined in
``.circle/config.yml`` as the cloud version of CircleCI.
Args:
dirname (:obj:`str`, optional): path to package that should be tested
test_path (:obj:`str`, optional): path to tests that should be run
verbose (:obj:`str`, optional): if :obj:`True`, display stdout from tests
ssh_key_filename (:obj:`str`, optional): path to GitHub SSH key
Raises:
:obj:`BuildHelperError`: if the tests fail
"""
ssh_key_filename = os.path.expanduser(ssh_key_filename)
karr_lab_build_utils_dirname = os.path.expanduser('~/Documents/karr_lab_build_utils')
# delete __pycache__ directories
for root, rel_dirnames, rel_filenames in os.walk(dirname):
for rel_dirname in fnmatch.filter(rel_dirnames, '__pycache__'):
shutil.rmtree(os.path.join(root, rel_dirname))
# update CircleCI to use build image with SSH key
circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml')
backup_circleci_config_filename = os.path.join(dirname, '.circleci', 'config.yml.save')
with open(circleci_config_filename, 'r') as file:
config = yaml.load(file)
image_name = config['jobs']['build']['docker'][0]['image']
if image_name.endswith('.with_ssh_key'):
image_with_ssh_key_name = image_name
image_name = image_name[:-13]
else:
image_with_ssh_key_name = image_name + '.with_ssh_key'
shutil.copyfile(circleci_config_filename, backup_circleci_config_filename)
config['jobs']['build']['docker'][0]['image'] = image_with_ssh_key_name
with open(circleci_config_filename, 'w') as file:
yaml.dump(config, file, default_flow_style=False)
# Build docker image with SSH key
circleci_context_dirname = os.path.join(karr_lab_build_utils_dirname, 'circleci_docker_context')
if not os.path.isdir(circleci_context_dirname):
os.makedirs(circleci_context_dirname)
shutil.copy(ssh_key_filename, os.path.join(circleci_context_dirname, 'GITHUB_SSH_KEY'))
dockerfile_filename = os.path.join(circleci_context_dirname, 'Dockerfile_Circleci')
with open(dockerfile_filename, 'w') as file:
file.write('FROM {}\n'.format(image_name))
file.write('COPY circleci_docker_context/GITHUB_SSH_KEY /root/.ssh/id_rsa\n')
file.write('ENV TEST_SERVER_TOKEN={}\n'.format(self.test_server_token or ''))
file.write('RUN eval `ssh-agent` && ssh-add /root/.ssh/id_rsa\n')
file.write('CMD bash\n')
self._run_docker_command(['build',
'--tag', image_with_ssh_key_name,
'-f', os.path.join('circleci_docker_context', 'Dockerfile_Circleci'),
'.'],
cwd=karr_lab_build_utils_dirname)
# test package
with capturer.CaptureOutput() as captured:
process = subprocess.Popen(['circleci',
'--env', 'test_path={}'.format(test_path),
'--env', 'verbose={:d}'.format(verbose),
'--env', 'dry_run=1',
'build'], cwd=dirname)
while process.poll() is None:
time.sleep(0.5)
out = captured.get_text()
# revert CircleCI config file
os.remove(circleci_config_filename)
shutil.move(backup_circleci_config_filename, circleci_config_filename)
# delete docker image
self._run_docker_command(['rmi', image_with_ssh_key_name], raise_error=False)
# cleanup circleci context
shutil.rmtree(circleci_context_dirname)
# raise error if tests didn't pass
if process.returncode != 0 or 'Task failed' in out:
raise BuildHelperError(out.encode('utf-8'))
def get_test_results(self):
""" Load test results from a set of XML files
Results:
:obj:`TestResults`: test results
"""
test_results = TestResults()
filename_pattern = os.path.join(self.proj_tests_xml_dir,
'{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for filename in glob.glob(filename_pattern):
match = re.match('^{}\.(.*?)\.xml$'.format(self.proj_tests_xml_latest_filename), os.path.basename(filename))
python_version = match.group(1)
doc = minidom.parse(filename)
suite = doc.getElementsByTagName('testsuite')[0]
for case in suite.getElementsByTagName('testcase'):
case_result = TestCaseResult()
case_result.classname = case.getAttribute('classname')
case_result.name = case.getAttribute('name')
case_result.python_version = python_version
case_result.time = float(case.getAttribute('time'))
if case.hasAttribute('file'):
case_result.file = case.getAttribute('file')
if case.hasAttribute('line'):
case_result.line = int(float(case.getAttribute('line')))
stdout = case.getElementsByTagName('system-out')
if stdout:
case_result.stdout = ''.join([child.nodeValue for child in stdout[0].childNodes])
stderr = case.getElementsByTagName('system-err')
if stderr:
case_result.stderr = ''.join([child.nodeValue for child in stderr[0].childNodes])
skip = case.getElementsByTagName('skipped')
error = case.getElementsByTagName('error')
failure = case.getElementsByTagName('failure')
if skip:
case_result.type = TestCaseResultType.skipped
elif error:
case_result.type = TestCaseResultType.error
elif failure:
case_result.type = TestCaseResultType.failure
else:
case_result.type = TestCaseResultType.passed
not_pass = skip or error or failure
if not_pass:
case_result.subtype = not_pass[0].getAttribute('type')
case_result.message = not_pass[0].getAttribute('message')
case_result.details = ''.join([child.nodeValue for child in not_pass[0].childNodes])
test_results.cases.append(case_result)
return test_results
def get_test_results_status(self, test_results, installation_error, tests_error, other_error, dry_run=False):
""" Get the status of a set of results
* Old err
* New error
* Fixed error
* New downstream error
Args:
test_results (:obj:`TestResults`): test results
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
if dry_run:
return {
'is_fixed': False,
'is_old_error': False,
'is_new_error': False,
'is_other_error': False,
'is_new_downstream_error': False,
}
# determine if there is an error
if (installation_error or tests_error or other_error) and test_results.get_num_tests() == 0:
is_other_error = True
is_new_error = False
is_old_error = False
is_fixed = False
else:
is_other_error = False
passed = test_results.get_num_errors() == 0 and test_results.get_num_failures() == 0
# determine if error is new
if self.build_num <= 1:
if passed:
is_old_error = False
is_new_error = False
is_fixed = True
else:
is_old_error = False
is_new_error = True
is_fixed = False
else:
prev_result = self.run_circleci_api('/' + str(self.build_num - 1))
if passed:
is_old_error = False
is_new_error = False
is_fixed = prev_result['status'] not in ['success', 'fixed']
else:
is_old_error = prev_result['status'] not in ['success', 'fixed']
is_new_error = prev_result['status'] in ['success', 'fixed']
is_fixed = False
# determine if build was triggered by an upstream package
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
if upstream_repo_name and is_new_error and self.build_num > 1 and not is_other_error:
is_new_downstream_error = True
else:
is_new_downstream_error = False
return {
'is_fixed': is_fixed,
'is_old_error': is_old_error,
'is_new_error': is_new_error,
'is_other_error': is_other_error,
'is_new_downstream_error': is_new_downstream_error,
}
def do_post_test_tasks(self, installation_error, tests_error, dry_run=False):
""" Do all post-test tasks for CircleCI
* Make test and coverage reports
* Compile documentation
* Archive test and coverage reports to the Karr Lab test history server, Coveralls, and Code Climate
* Trigger tests of downstream dependencies
* Notify authors of new failures in downstream packages
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:obj:`dict`: status of a set of results
"""
try:
self.make_and_archive_reports(dry_run=dry_run)
other_error = False
except Exception as exception:
other_error = True
triggered_packages = self.trigger_tests_of_downstream_dependencies(dry_run=dry_run)
status = self.send_email_notifications(installation_error, tests_error, other_error, dry_run=dry_run)
return (triggered_packages, status)
def send_email_notifications(self, installation_error, tests_error, other_error, dry_run=False):
""" Send email notifications of failures, fixes, and downstream failures
Args:
installation_error (:obj:`bool`): :obj:`True` if there were other errors during the installation
tests_error (:obj:`bool`): obj:`False` if the tests passes
other_error (:obj:`bool`): :obj:`True` if there were other errors during the build such as in generating and/or
archiving the reports
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`dict`: status of a set of results
"""
test_results = self.get_test_results()
status = self.get_test_results_status(test_results, installation_error, tests_error, other_error, dry_run=dry_run)
# stop if this is a dry run
if dry_run:
return status
# build context for email
result = self.run_circleci_api('/' + str(self.build_num))
context = {
'repo_name': self.repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': self.build_num,
'build_url': result['build_url'],
'test_results': test_results,
}
if status['is_new_downstream_error']:
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = int(os.getenv('UPSTREAM_BUILD_NUM', '0'))
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
context['upstream'] = {
'repo_name': upstream_repo_name,
'commit': result['all_commit_details'][0]['commit'],
'committer_name': result['all_commit_details'][0]['committer_name'],
'committer_email': result['all_commit_details'][0]['committer_email'],
'commit_subject': result['all_commit_details'][0]['subject'],
'commit_url': result['all_commit_details'][0]['commit_url'],
'build_num': upstream_build_num,
'build_url': result['build_url'],
}
recipients = [{'name': 'Whole-Cell Modeling Developers', 'email': 'wholecell-developers@googlegroups.com'}]
# send notifications
if status['is_fixed']:
subject = '[Builds] [{0}] {0} is fixed!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'fixed.html', context)
elif status['is_old_error']:
subject = '[Builds] [{0}] {0} is still broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'old_error.html', context)
elif status['is_new_error']:
subject = '[Builds] [{0}] {0} has been broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'new_error.html', context)
elif status['is_other_error']:
subject = '[Builds] [{0}] {0} is broken!'.format(context['repo_name'])
self._send_notification_email(recipients, subject, 'other_error.html', context)
if status['is_new_downstream_error']:
recipients.append({'name': context['upstream']['committer_name'], 'email': context['upstream']['committer_email']})
subject = '[Builds] [{1}] commit {0} to {1} may have broken {2}'.format(
context['upstream']['commit'], context['upstream']['repo_name'], context['repo_name'])
self._send_notification_email(recipients, subject, 'new_downstream_error.html', context)
return status
def _send_notification_email(self, recipients, subject, template_filename, context, dry_run=False):
""" Send an email notification of test results
Args:
recipients (:obj:`list` of :obj:`dict`): recipient names and email addresses
subject (:obj:`str`): subject
template_filename (obj:`str`): path to template
context (obj:`dict`): context for template
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
full_template_filename = pkg_resources.resource_filename(
'karr_lab_build_utils', os.path.join('templates', 'email_notifications', template_filename))
with open(full_template_filename, 'r') as file:
template = Template(file.read())
body = template.render(**context)
msg = email.message.Message()
msg['From'] = email.utils.formataddr((str(email.header.Header('Karr Lab Build System', 'utf-8')), 'noreply@karrlab.org'))
tos = []
for recipient in recipients:
tos.append(email.utils.formataddr((str(email.header.Header(recipient['name'], 'utf-8')), recipient['email'])))
msg['To'] = ', '.join(tos)
msg['Subject'] = subject
msg.add_header('Content-Type', 'text/html')
msg.set_payload(body)
if not dry_run:
smtp = smtplib.SMTP('smtp.gmail.com:587')
smtp.ehlo()
smtp.starttls()
smtp.login('karr.lab.daemon', os.getenv('KARR_LAB_DAEMON_GMAIL_PASSWORD'))
smtp.sendmail('noreply@karrlab.org', [recipient['email'] for recipient in recipients], msg.as_string())
smtp.quit()
def make_and_archive_reports(self, coverage_dirname='.', dry_run=False):
""" Make and archive reports:
* Upload test report to history server
* Upload coverage report to Coveralls and Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
""" test reports """
# Upload test report to history server
self.archive_test_report()
""" coverage """
# Merge coverage reports
# Generate HTML report
# Upload coverage report to Coveralls and Code Climate
self.combine_coverage_reports(coverage_dirname=coverage_dirname)
self.archive_coverage_report(coverage_dirname=coverage_dirname, dry_run=dry_run)
""" documentation """
self.make_documentation()
########################
# Test reports
########################
def archive_test_report(self):
""" Upload test report to history server
Raises:
:obj:`BuildHelperError`: if there is an error uploading the report to the test history server
"""
if not self.test_server_token or \
self.repo_name is None or \
self.repo_owner is None or \
self.repo_branch is None or \
self.repo_revision is None:
return
abs_xml_latest_filename_pattern = os.path.join(
self.proj_tests_xml_dir, '{0}.*.xml'.format(self.proj_tests_xml_latest_filename))
for abs_xml_latest_filename in glob.glob(abs_xml_latest_filename_pattern):
match = re.match('^.*?\.(\d+\.\d+\.\d+)\.xml$', abs_xml_latest_filename)
pyv = match.group(1)
r = requests.post('http://tests.karrlab.org/rest/submit_report',
data={
'token': self.test_server_token,
'repo_name': self.repo_name,
'repo_owner': self.repo_owner,
'repo_branch': self.repo_branch,
'repo_revision': self.repo_revision,
'build_num': self.build_num,
'report_name': pyv,
},
files={
'report': open(abs_xml_latest_filename, 'rb'),
})
r.raise_for_status()
r_json = r.json()
if 'success' not in r_json or not r_json['success']:
raise BuildHelperError('Error uploading report to test history server: {}'.format(r_json['message']))
########################
# Coverage reports
########################
def combine_coverage_reports(self, coverage_dirname='.'):
"""
Args:
coverage_dirname (:obj:`str`, optional): directory to merge coverage files
"""
data_paths = []
for name in glob.glob(os.path.join(coverage_dirname, '.coverage.*')):
data_path = tempfile.mktemp()
shutil.copyfile(name, data_path)
data_paths.append(data_path)
# stop if there are no files to combine
if not data_paths:
warnings.warn('No coverage files exist to combine', UserWarning)
return
coverage_doc = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
coverage_doc.combine(data_paths=data_paths)
coverage_doc.save()
def archive_coverage_report(self, coverage_dirname='.', dry_run=False):
""" Archive coverage report:
* Upload report to Coveralls
* Upload report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
"""
# upload to Coveralls
if self.COVERALLS_ENABLED:
self.upload_coverage_report_to_coveralls(coverage_dirname=coverage_dirname, dry_run=dry_run)
# upload to Code Climate
if self.CODE_CLIMATE_ENABLED:
self.upload_coverage_report_to_code_climate(coverage_dirname=coverage_dirname, dry_run=dry_run)
def upload_coverage_report_to_coveralls(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Coveralls
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Coveralls', UserWarning)
return
if self.coveralls_token:
runner = coveralls.Coveralls(True, repo_token=self.coveralls_token,
service_name='circle-ci', service_job_id=self.build_num)
def get_coverage():
workman = coverage.coverage(data_file=os.path.join(coverage_dirname, '.coverage'))
workman.load()
workman.get_data()
return coveralls.reporter.CoverallReporter(workman, workman.config).report()
with patch.object(coveralls.Coveralls, 'get_coverage', return_value=get_coverage()):
runner.wear(dry_run=dry_run)
def upload_coverage_report_to_code_climate(self, coverage_dirname='.', dry_run=False):
""" Upload coverage report to Code Climate
Args:
coverage_dirname (:obj:`str`, optional): directory to save coverage data
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls server
Raises:
:obj:`BuildHelperError`: If error uploading code coverage to Code Climate
"""
# don't upload if there is no coverage file
if not os.path.isfile(os.path.join(coverage_dirname, '.coverage')):
warnings.warn('No coverage file exists to upload to Code Climate', UserWarning)
return
if self.code_climate_token:
code_climate_runner = CodeClimateRunner([
'--token', self.code_climate_token,
'--file', os.path.join(coverage_dirname, '.coverage'),
])
if not dry_run:
self.run_method_and_capture_stderr(code_climate_runner.run)
########################
# Documentation
########################
def create_documentation_template(self, dirname='.'):
""" Create Sphinx documentation template for a package
Args:
dirname (:obj:`str`, optional): path to package
Raises:
:obj:`ValueError`: if no package or more than one package is specified
"""
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
packages = parser.get('sphinx-apidocs', 'packages').strip().split('\n')
if len(packages) != 1:
raise ValueError('Sphinx configuration auto-generation only supports 1 package')
if not os.path.isdir(os.path.join(dirname, self.proj_docs_dir)):
os.mkdir(os.path.join(dirname, self.proj_docs_dir))
for package in packages:
filenames = [
'conf.py',
'requirements.txt',
'conda.environment.yml',
'spelling_wordlist.txt',
'index.rst',
'overview.rst',
'installation.rst',
'about.rst',
'references.rst',
'references.bib',
]
context = {
"package": package,
'version': self.INITIAL_PACKAGE_VERSION,
'year': datetime.now().year,
'package_underline': '=' * len(package),
}
for filename in filenames:
template_filename = pkg_resources.resource_filename('karr_lab_build_utils', os.path.join('templates', 'docs', filename))
with open(template_filename, 'r') as file:
template = Template(file.read())
template.stream(**context).dump(os.path.join(dirname, self.proj_docs_dir, filename))
def make_documentation(self, spell_check=False):
""" Make HTML documentation using Sphinx for one or more packages. Save documentation to `proj_docs_build_html_dir`
Args:
spell_check (:obj:`bool`): if :obj:`True`, run spell checking
Raises:
:obj:`BuildHelperError`: If project name not set
"""
# create `proj_docs_static_dir`, if necessary
if not os.path.isdir(self.proj_docs_static_dir):
os.mkdir(self.proj_docs_static_dir)
# build HTML documentation
self.run_method_and_capture_stderr(sphinx_build, [self.proj_docs_dir, self.proj_docs_build_html_dir])
# run spell check
if spell_check:
self.run_method_and_capture_stderr(sphinx_build, [
'-b', 'spelling',
'-d', self.proj_docs_build_doctrees_dir,
self.proj_docs_dir,
self.proj_docs_build_spelling_dir,
])
def compile_downstream_dependencies(self, dirname='.', packages_parent_dir='..', downstream_dependencies_filename=None):
""" Compile the downstream dependencies of a package and save them to :obj:`downstream_dependencies_filename`
Args:
dirname (:obj:`str`, optional): path to package
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
downstream_dependencies_filename (:obj:`str`, optional): path to save list of downstream dependencies in YAML format
Returns:
:obj:`list` of :obj:`str`: downstream dependencies
Raises:
:obj:`BuildHelperError`: if a package has more than one module
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
packages_parent_dir = os.path.abspath(packages_parent_dir)
# get the name of the current package
parser = configparser.ConfigParser()
parser.read(os.path.join(dirname, 'setup.cfg'))
tmp = parser.get('coverage:run', 'source').strip().split('\n')
if len(tmp) != 1:
raise BuildHelperError('Package should have only one module')
this_pkg_name = tmp[0]
# collect the downstream dependencies by analyzing the requirements files of other packages
# :todo: support branches
downstream_dependencies = []
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
other_pkg_name = dirname[len(packages_parent_dir) + 1:]
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
if this_pkg_name in install_requires or this_pkg_name in extras_require['all']:
downstream_dependencies.append(other_pkg_name)
# save the downstream dependencies to a file
if downstream_dependencies_filename:
with open(downstream_dependencies_filename, 'w') as file:
yaml.dump(downstream_dependencies, file, default_flow_style=False)
# return the downstream dependencies
return downstream_dependencies
def are_package_dependencies_acyclic(self, packages_parent_dir='..'):
""" Check if the package dependencies are acyclic so they are supported by CircleCI
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
Returns:
:obj:`bool`: :obj:`True` if the package dependencies are acyclic
"""
graph = networkx.DiGraph()
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
graph.add_node(pkg)
# create edges for dependencies
dep_filename = os.path.join(dirname, '.circleci/downstream_dependencies.yml')
if os.path.isfile(dep_filename):
with open(dep_filename, 'r') as file:
deps = yaml.load(file)
for other_pkg in deps:
graph.add_edge(pkg, other_pkg)
try:
networkx.algorithms.cycles.find_cycle(graph)
return False
except networkx.NetworkXNoCycle:
return True
def visualize_package_dependencies(self, packages_parent_dir='..', out_filename='../package_dependencies.pdf'):
""" Visualize downstream package dependencies as a graph
Args:
packages_parent_dir (:obj:`str`, optional): path to the parent directory of the packages
out_filename (:obj:`str`, optional): path to save visualization
"""
basename, format = os.path.splitext(out_filename)
dot = graphviz.Digraph(format=format[1:])
for dirname in glob.glob(os.path.join(packages_parent_dir, '*')):
if os.path.isdir(dirname) and os.path.isfile(os.path.join(dirname, '.circleci/config.yml')):
# get package name
pkg = dirname[len(packages_parent_dir) + 1:]
# create node for package
dot.node(pkg, pkg)
# create edges for dependencies
dep_filename = os.path.join(dirname, '.circleci/downstream_dependencies.yml')
if os.path.isfile(dep_filename):
with open(dep_filename, 'r') as file:
deps = yaml.load(file)
for other_pkg in deps:
dot.edge(pkg, other_pkg)
dot.render(filename=basename, cleanup=True)
def trigger_tests_of_downstream_dependencies(self, downstream_dependencies_filename='.circleci/downstream_dependencies.yml',
dry_run=False):
""" Trigger CircleCI to test downstream dependencies listed in :obj:`downstream_dependencies_filename`
Args:
downstream_dependencies_filename (:obj:`str`, optional): path to YAML file which contains a list of downstream dependencies
dry_run (:obj:`bool`, optional): if true, don't upload to the Coveralls and Code Climate servers
Returns:
:obj:`list` of :obj:`str`: names of triggered packages
:todo: support branches
"""
# stop if this is a dry run
if dry_run:
return []
# stop if the tests didn't pass
test_results = self.get_test_results()
if test_results.get_num_errors() > 0 or test_results.get_num_failures() > 0:
return []
# read downstream dependencies
with open(downstream_dependencies_filename, 'r') as file:
packages = yaml.load(file)
# stop if there are no downstream dependencies
if not packages:
return []
upstream_repo_name = os.getenv('UPSTREAM_REPONAME', '')
upstream_build_num = os.getenv('UPSTREAM_BUILD_NUM', '0')
if not upstream_repo_name:
upstream_repo_name = self.repo_name
upstream_build_num = str(self.build_num)
result = self.run_circleci_api('/' + str(upstream_build_num), repo_name=upstream_repo_name)
upstream_build_time = dateutil.parser.parse(result['all_commit_details'][0]['committer_date'])
triggered_packages = []
for package in packages:
branch = 'master'
# get summary of recent builds
builds = self.run_circleci_api('', repo_name=package)
# don't trigger build if a build has already been triggered from the same upstream build
# this prevents building the same project multiple times, including infinite looping
already_queued = False
for build in builds:
# don'trigger a build if this is the same package which triggered the cascade
if package == upstream_repo_name and \
str(build['build_num']) == upstream_build_num and \
build['build_num'] != self.build_num:
already_queued = True
break
# don't trigger a build if the package already been triggered from the same upstream commit
build_parameters = build['build_parameters']
if build_parameters and 'UPSTREAM_REPONAME' in build_parameters and \
build_parameters['UPSTREAM_REPONAME'] == upstream_repo_name and \
build_parameters['UPSTREAM_BUILD_NUM'] == upstream_build_num:
already_queued = True
break
# don't trigger a build if the package has already been more recently tested than the commit time
build_start_time = build['start_time']
if build_start_time is None or dateutil.parser.parse(build['start_time']) > upstream_build_time:
already_queued = True
break
if already_queued:
continue
# trigger build
self.run_circleci_api('/tree/{}'.format(branch), method='post', repo_name=package, data={
'build_parameters': {
'UPSTREAM_REPONAME': upstream_repo_name,
'UPSTREAM_BUILD_NUM': upstream_build_num,
}
})
triggered_packages.append(package)
return triggered_packages
def get_version(self):
""" Get the version of this package
Returns:
:obj:`str`: the version
"""
return '{0:s} (Python {1[0]:d}.{1[1]:d}.{1[2]:d})'.format(karr_lab_build_utils.__version__, sys.version_info)
@staticmethod
def get_python_version():
""" Get the Python version
Returns:
:obj:`str`: the Python version
"""
return '{0[0]:d}.{0[1]:d}.{0[2]:d}'.format(sys.version_info)
def run_method_and_capture_stdout(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
Returns:
:obj:`str`: stdout
"""
with abduct.captured(abduct.out(), abduct.err()) as (stdout, stderr):
result = func(*args, **kwargs)
out_msg = stdout.getvalue()
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
return out_msg
def run_method_and_capture_stderr(self, func, *args, **kwargs):
""" Run a method that returns a numerical error value, and exit if the return value is non-zero
Args:
func (:obj:`function`): function to run
*args (:obj:`list`): arguments to :obj:`func`
**kwargs (:obj:`dict`): keyword arguments to obj:`func`
"""
with abduct.captured(abduct.err()) as stderr:
result = func(*args, **kwargs)
err_msg = stderr.getvalue()
if result != 0:
sys.stderr.write(err_msg)
sys.stderr.flush()
sys.exit(1)
def analyze_package(self, package_name, messages=None):
""" Perform static analyses of a package using Pylint.
The default options will identify the following issues:
* Unused imported modules, classes, functions, and variables
* Reimported modules, classes, functions, and variables
* Wild card imports outside of __init__.py
* Duplicate arguments and keys
* Missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
messages (:obj:`list` of :obj:`str`): list of Pylint checks to perform
"""
if messages is None:
messages = [
# variables
'W0611', # unused-import
'W0614', # unused-wildcard-import
'W0613', # unused-argument
'W0612', # unused-variable
# imports
'W0404', # reimported
'W0401', # wildcard-import
# similarities
'E0108', # duplicate-argument-name
'W0109', # duplicate-key
]
msg_opts = [
'--disable=all',
'--enable=' + ','.join(messages),
]
report_opts = [
'--reports=n',
'--score=n',
]
# TODO: debug, does not work:
epylint.lint(package_name, msg_opts + report_opts)
def find_missing_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding missing requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: list of missing dependencies and their occurences in the code
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_missing_reqs.log.setLevel(logging.ERROR)
missing = pip_check_reqs.find_missing_reqs.find_missing_reqs(options)
# filter out optional dependencies
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = install_requires
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps += opt_deps
missing = list(filter(lambda m: m[0].replace('-', '_') not in all_deps, missing))
return missing
def find_unused_requirements(self, package_name, dirname='.', ignore_files=None):
""" Finding unused_requirements
Args:
package_name (:obj:`str`): name of the package to analyze
dirname (:obj:`str`, optional): path to package
ignore_files (:obj:`list`, optional): files to ignore
Returns:
:obj:`list`: name of the unused dependencies
"""
import pkg_utils
# pkg_utils is imported locally so that we can use karr_lab_build_utils to properly calculate its coverage;
# :todo: figure out how to fix this
options = attrdict.AttrDict()
options.paths = [package_name]
options.ignore_files = pip_check_reqs.common.ignorer(ignore_files or [])
options.ignore_mods = pip_check_reqs.common.ignorer([])
options.ignore_reqs = pip_check_reqs.common.ignorer([])
options.verbose = False
options.debug = False
options.version = False
pip_check_reqs.find_extra_reqs.log.setLevel(logging.ERROR)
# get all requirements
install_requires, extras_require, _, _ = pkg_utils.get_dependencies(
dirname, include_extras=False, include_specs=False, include_markers=False)
all_deps = set(install_requires)
for option, opt_deps in extras_require.items():
if option not in ['all', 'tests', 'docs']:
all_deps = all_deps | set(opt_deps)
all_deps = [dep.replace('_', '-') for dep in all_deps]
# find unused requirements
with mock.patch('pip_check_reqs.common.find_required_modules', return_value=all_deps):
unuseds = pip_check_reqs.find_extra_reqs.find_extra_reqs(options)
# correct for editablly-installed packages
useds = pip_check_reqs.common.find_imported_modules(options).keys()
useds = [used.partition('.')[0].replace('_', '-') for used in useds]
unuseds = list(set(unuseds).difference(set(useds)))
# return canonical names
unuseds = [unused.replace('-', '_') for unused in unuseds]
return unuseds
def upload_package_to_pypi(self, dirname='.', repository='pypi', pypi_config_filename='~/.pypirc'):
""" Upload a package to PyPI
Args:
dirname (:obj:`str`, optional): path to package to upload
repository (:obj:`str`, optional): repository to upload code to (section in .pypirc or a full URL)
pypi_config_filename (:obj:`str`, optional): path to .pypirc
"""
# cleanup
if os.path.isdir(os.path.join(dirname, 'build')):
shutil.rmtree(os.path.join(dirname, 'build'))
if os.path.isdir(os.path.join(dirname, 'dist')):
shutil.rmtree(os.path.join(dirname, 'dist'))
# package code
subprocess.check_call([sys.executable, os.path.join(os.path.abspath(dirname), 'setup.py'), 'sdist', 'bdist_wheel'],
cwd=dirname)
# upload
options = []
if repository:
options += ['--repository', repository]
if pypi_config_filename:
options += ['--config-file', os.path.abspath(os.path.expanduser(pypi_config_filename))]
uploads = []
for path in glob.glob(os.path.join(dirname, 'dist', '*')):
uploads.append(path)
twine.commands.upload.main(options + uploads)
# cleanup
shutil.rmtree(os.path.join(dirname, 'build'))
shutil.rmtree(os.path.join(dirname, 'dist'))
def run_circleci_api(self, command, method='get', repo_type=None, repo_owner=None, repo_name=None,
data=None, circleci_api_token=None):
""" Run the CircleCI API
Args:
command (:obj:`str`): API command
method (:obj:`str`): type of HTTP request (get, post, delete)
repo_type (:obj:`str`, optional): repository type (e.g., github)
repo_owner (:obj:`str`, optional): repository owner
repo_name (:obj:`str`, optional): repository name
data (:obj:`str`, optional): data
circleci_api_token (:obj:`str`, optional): CircleCI API token
Returns:
:obj:`dict`: CircleCI result
Raises:
:obj:`requests.exceptions.HTTPError`: if the HTTP request to CircleCI does not succeed
"""
if not repo_type:
repo_type = self.repo_type
if not repo_owner:
repo_owner = self.repo_owner
if not repo_name:
repo_name = self.repo_name
if not circleci_api_token:
circleci_api_token = self.circleci_api_token
url = '{}/project/{}/{}/{}{}?circle-token={}'.format(
self.CIRCLE_API_ENDPOINT, repo_type, repo_owner, repo_name, command, circleci_api_token)
request_method = getattr(requests, method)
response = request_method(url, json=data)
response.raise_for_status()
return response.json()
class TestResults(object):
""" Unit test results
Attributes:
cases (:obj:`list` of :obj:`TestCase`): test case results
"""
def __init__(self):
self.cases = []
@property
def num_tests(self):
return self.get_num_tests()
@property
def num_passed(self):
return self.get_num_passed()
@property
def num_skipped(self):
return self.get_num_skipped()
@property
def num_errors(self):
return self.get_num_errors()
@property
def num_failures(self):
return self.get_num_failures()
def get_num_tests(self):
""" Get the number of tests
Returns:
:obj:`int`: number of tests
"""
return len(self.cases)
def get_num_passed(self):
""" Get the number of tests that passed
Returns:
:obj:`int`: number of tests that passed
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.passed, self.cases)))
def get_num_skipped(self):
""" Get the number of skipped tests
Returns:
:obj:`int`: number of skipped tests
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.skipped, self.cases)))
def get_num_errors(self):
""" Get the number of tests with errors
Returns:
:obj:`int`: number of tests with errors
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.error, self.cases)))
def get_num_failures(self):
""" Get the number of tests with failures
Returns:
:obj:`int`: number of tests with failures
"""
return len(list(filter(lambda case: case.type == TestCaseResultType.failure, self.cases)))
class TestCaseResult(object):
""" The result of a test case
Attributes:
classname (obj:`str`): name of the class of the test case
name (obj:`str`): name of the test case
filename (obj:`str`): file where the test was defined
line (obj:`int`): line where the test was defined
python_version (obj:`str`): python version which ran the test
type (obj:`TestCaseResultType`): type of the result (pass, skip, error, failure)
subtype (obj:`str`): detailed type of the result
message (obj:`str`): message from the result
details (obj:`str`): detailed message from the result
time (obj:`float`): duration of the time in seconds
stdout (obj:`str`): standard output
stderr (obj:`str`): standard error
"""
def __init__(self):
self.classname = None
self.name = None
self.filename = None
self.line = None
self.python_version = None
self.time = None
self.stdout = None
self.stderr = None
self.type = None
self.subtype = None
self.message = None
self.details = None
class TestCaseResultType(enum.Enum):
""" Type of test case result """
passed = 0
skipped = 1
error = 2
failure = 3
class BuildHelperError(Exception):
""" Represents :obj:`BuildHelper` errors """
pass
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import logging
import asyncio
from transitions import Machine, MachineError
from hbmqtt.session import Session
from hbmqtt.mqtt.protocol.broker_handler import BrokerProtocolHandler
from hbmqtt.mqtt.connect import ConnectPacket
from hbmqtt.mqtt.connack import ConnackPacket, ReturnCode
from hbmqtt.errors import HBMQTTException
from hbmqtt.utils import format_client_message, gen_client_id
_defaults = {
'bind-address': 'localhost',
'bind-port': 1883,
'timeout-disconnect-delay': 10
}
class BrokerException(BaseException):
pass
class Subscription:
def __init__(self, session, qos):
self.session = session
self.qos = qos
class RetainedApplicationMessage:
def __init__(self, source_session, topic, data, qos=None):
self.source_session = source_session
self.topic = topic
self.data = data
self.qos = qos
class Broker:
states = ['new', 'starting', 'started', 'not_started', 'stopping', 'stopped', 'not_stopped', 'stopped']
def __init__(self, config=None, loop=None):
self.logger = logging.getLogger(__name__)
self.config = _defaults
if config is not None:
self.config.update(config)
if loop is not None:
self._loop = loop
else:
self._loop = asyncio.get_event_loop()
self._server = None
self._init_states()
self._sessions = dict()
self._subscriptions = dict()
self._global_retained_messages = dict()
def _init_states(self):
self.machine = Machine(states=Broker.states, initial='new')
self.machine.add_transition(trigger='start', source='new', dest='starting')
self.machine.add_transition(trigger='starting_fail', source='starting', dest='not_started')
self.machine.add_transition(trigger='starting_success', source='starting', dest='started')
self.machine.add_transition(trigger='shutdown', source='started', dest='stopping')
self.machine.add_transition(trigger='stopping_success', source='stopping', dest='stopped')
self.machine.add_transition(trigger='stopping_failure', source='stopping', dest='not_stopped')
self.machine.add_transition(trigger='start', source='stopped', dest='starting')
@asyncio.coroutine
def start(self):
try:
self.machine.start()
self.logger.debug("Broker starting")
except MachineError as me:
self.logger.debug("Invalid method call at this moment: %s" % me)
raise BrokerException("Broker instance can't be started: %s" % me)
try:
self._server = yield from asyncio.start_server(self.client_connected,
self.config['bind-address'],
self.config['bind-port'],
loop=self._loop)
self.logger.info("Broker listening on %s:%d" % (self.config['bind-address'], self.config['bind-port']))
self.machine.starting_success()
except Exception as e:
self.logger.error("Broker startup failed: %s" % e)
self.machine.starting_fail()
raise BrokerException("Broker instance can't be started: %s" % e)
@asyncio.coroutine
def shutdown(self):
try:
self.machine.shutdown()
except MachineError as me:
self.logger.debug("Invalid method call at this moment: %s" % me)
raise BrokerException("Broker instance can't be stopped: %s" % me)
self._server.close()
self.logger.debug("Broker closing")
yield from self._server.wait_closed()
self.logger.info("Broker closed")
self.machine.stopping_success()
@asyncio.coroutine
def client_connected(self, reader, writer):
extra_info = writer.get_extra_info('peername')
remote_address = extra_info[0]
remote_port = extra_info[1]
self.logger.debug("Connection from %s:%d" % (remote_address, remote_port))
# Wait for first packet and expect a CONNECT
connect = None
try:
connect = yield from ConnectPacket.from_stream(reader)
self.logger.debug(" <-in-- " + repr(connect))
self.check_connect(connect)
except HBMQTTException as exc:
self.logger.warn("[MQTT-3.1.0-1] %s: Can't read first packet an CONNECT: %s" %
(format_client_message(address=remote_address, port=remote_port), exc))
writer.close()
self.logger.debug("Connection closed")
return
except BrokerException as be:
self.logger.error('Invalid connection from %s : %s' %
(format_client_message(address=remote_address, port=remote_port), be))
writer.close()
self.logger.debug("Connection closed")
return
connack = None
if connect.variable_header.proto_level != 4:
# only MQTT 3.1.1 supported
self.logger.error('Invalid protocol from %s: %d' %
(format_client_message(address=remote_address, port=remote_port),
connect.variable_header.protocol_level))
connack = ConnackPacket.build(0, ReturnCode.UNACCEPTABLE_PROTOCOL_VERSION) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.username_flag and connect.payload.username is None:
self.logger.error('Invalid username from %s' %
(format_client_message(address=remote_address, port=remote_port)))
connack = ConnackPacket.build(0, ReturnCode.BAD_USERNAME_PASSWORD) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.password_flag and connect.payload.password is None:
self.logger.error('Invalid password %s' % (format_client_message(address=remote_address, port=remote_port)))
connack = ConnackPacket.build(0, ReturnCode.BAD_USERNAME_PASSWORD) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.clean_session_flag == False and connect.payload.client_id is None:
self.logger.error('[MQTT-3.1.3-8] [MQTT-3.1.3-9] %s: No client Id provided (cleansession=0)' %
format_client_message(address=remote_address, port=remote_port))
connack = ConnackPacket.build(0, ReturnCode.IDENTIFIER_REJECTED)
self.logger.debug(" -out-> " + repr(connack))
if connack is not None:
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
writer.close()
return
client_session = None
self.logger.debug("Clean session={0}".format(connect.variable_header.clean_session_flag))
self.logger.debug("known sessions={0}".format(self._sessions))
if connect.variable_header.clean_session_flag:
client_id = connect.payload.client_id
if client_id is not None and client_id in self._sessions:
# Delete existing session
del self._sessions[client_id]
client_session = Session()
client_session.parent = 0
self._sessions[client_id] = client_session
else:
# Get session from cache
client_id = connect.payload.client_id
if client_id in self._sessions:
self.logger.debug("Found old session %s" % repr(self._sessions[client_id]))
client_session = self._sessions[client_id]
client_session.parent = 1
else:
client_session = Session()
client_session.parent = 0
if client_session.client_id is None:
# Generate client ID
client_session.client_id = gen_client_id()
client_session.remote_address = remote_address
client_session.remote_port = remote_port
client_session.clean_session = connect.variable_header.clean_session_flag
client_session.will_flag = connect.variable_header.will_flag
client_session.will_retain = connect.variable_header.will_retain_flag
client_session.will_qos = connect.variable_header.will_qos
client_session.will_topic = connect.payload.will_topic
client_session.will_message = connect.payload.will_message
client_session.username = connect.payload.username
client_session.password = connect.payload.password
client_session.client_id = connect.payload.client_id
if connect.variable_header.keep_alive > 0:
client_session.keep_alive = connect.variable_header.keep_alive + self.config['timeout-disconnect-delay']
else:
client_session.keep_alive = 0
client_session.reader = reader
client_session.writer = writer
if self.authenticate(client_session):
connack = ConnackPacket.build(client_session.parent, ReturnCode.CONNECTION_ACCEPTED)
self.logger.info('%s : connection accepted' % format_client_message(session=client_session))
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
else:
connack = ConnackPacket.build(client_session.parent, ReturnCode.NOT_AUTHORIZED)
self.logger.info('%s : connection refused' % format_client_message(session=client_session))
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
writer.close()
return
client_session.machine.connect()
handler = BrokerProtocolHandler(self._loop)
handler.attach_to_session(client_session)
self.logger.debug("%s Start messages handling" % client_session.client_id)
yield from handler.start()
yield from self.publish_session_retained_messages(client_session)
self.logger.debug("%s Wait for disconnect" % client_session.client_id)
connected = True
wait_disconnect = asyncio.Task(handler.wait_disconnect())
wait_subscription = asyncio.Task(handler.get_next_pending_subscription())
wait_unsubscription = asyncio.Task(handler.get_next_pending_unsubscription())
wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message())
disconnect_event = False
while connected:
done, pending = yield from asyncio.wait(
[wait_disconnect, wait_subscription, wait_unsubscription, wait_deliver],
return_when=asyncio.FIRST_COMPLETED)
if wait_disconnect in done:
if not disconnect_event:
result = wait_disconnect.result()
self.logger.debug("%s Result from wait_diconnect: %s" % (client_session.client_id, result))
if result is None:
self.logger.debug("Will flag: %s" % client_session.will_flag)
#Connection closed anormally, send will message
if client_session.will_flag:
self.logger.debug("Client %s disconnected abnormally, sending will message" %
format_client_message(client_session))
yield from self.broadcast_application_message(
client_session, client_session.will_topic,
client_session.will_message,
client_session.will_qos)
if client_session.will_retain:
self.retain_message(client_session,
client_session.will_topic,
client_session.will_message,
client_session.will_qos)
disconnect_event = True
if not (wait_unsubscription.done() or wait_subscription.done() or wait_deliver.done):
connected = False
if wait_unsubscription in done:
self.logger.debug("%s handling unsubscription" % client_session.client_id)
unsubscription = wait_unsubscription.result()
for topic in unsubscription['topics']:
self.del_subscription(topic, client_session)
yield from handler.mqtt_acknowledge_unsubscription(unsubscription['packet_id'])
wait_unsubscription = asyncio.Task(handler.get_next_pending_unsubscription())
if wait_subscription in done:
self.logger.debug("%s handling subscription" % client_session.client_id)
subscriptions = wait_subscription.result()
return_codes = []
for subscription in subscriptions['topics']:
return_codes.append(self.add_subscription(subscription, client_session))
yield from handler.mqtt_acknowledge_subscription(subscriptions['packet_id'], return_codes)
for index, subscription in enumerate(subscriptions['topics']):
if return_codes[index] != 0x80:
yield from self.publish_retained_messages_for_subscription(subscription, client_session)
wait_subscription = asyncio.Task(handler.get_next_pending_subscription())
self.logger.debug(repr(self._subscriptions))
if wait_deliver in done:
self.logger.debug("%s handling message delivery" % client_session.client_id)
publish_packet = wait_deliver.result().packet
topic_name = publish_packet.variable_header.topic_name
data = publish_packet.payload.data
yield from self.broadcast_application_message(client_session, topic_name, data)
if publish_packet.retain_flag:
self.retain_message(client_session, topic_name, data)
wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message())
wait_subscription.cancel()
wait_unsubscription.cancel()
wait_deliver.cancel()
self.logger.debug("%s Client disconnecting" % client_session.client_id)
try:
yield from handler.stop()
except Exception as e:
self.logger.error(e)
finally:
handler.detach_from_session()
handler = None
client_session.machine.disconnect()
writer.close()
self.logger.debug("%s Session disconnected" % client_session.client_id)
@asyncio.coroutine
def check_connect(self, connect: ConnectPacket):
if connect.payload.client_id is None:
raise BrokerException('[[MQTT-3.1.3-3]] : Client identifier must be present' )
if connect.variable_header.will_flag:
if connect.payload.will_topic is None or connect.payload.will_message is None:
raise BrokerException('will flag set, but will topic/message not present in payload')
if connect.variable_header.reserved_flag:
raise BrokerException('[MQTT-3.1.2-3] CONNECT reserved flag must be set to 0')
def authenticate(self, session: Session):
# TODO : Handle client authentication here
return True
def retain_message(self, source_session, topic_name, data, qos=None):
if data is not None and data != b'':
# If retained flag set, store the message for further subscriptions
self.logger.debug("%s Retaining message on topic %s" % (source_session.client_id, topic_name))
retained_message = RetainedApplicationMessage(source_session, topic_name, data, qos)
self._global_retained_messages[topic_name] = retained_message
else:
# [MQTT-3.3.1-10]
self.logger.debug("%s Clear retained messages for topic '%s'" % (source_session.client_id, topic_name))
del self._global_retained_messages[topic_name]
def add_subscription(self, subscription, session):
import re
#wildcard_pattern = re.compile('(/.+?\+)|(/\+.+?)|(/.+?\+.+?)')
wildcard_pattern = re.compile('.*?/?\+/?.*?')
try:
a_filter = subscription['filter']
if '#' in a_filter and not a_filter.endswith('#'):
# [MQTT-4.7.1-2] Wildcard character '#' is only allowed as last character in filter
return 0x80
if '+' in a_filter and not wildcard_pattern.match(a_filter):
# [MQTT-4.7.1-3] + wildcard character must occupy entire level
return 0x80
qos = subscription['qos']
if 'max-qos' in self.config and qos > self.config['max-qos']:
qos = self.config['max-qos']
if a_filter not in self._subscriptions:
self._subscriptions[a_filter] = []
already_subscribed = next(
(s for s in self._subscriptions[a_filter] if s.session.client_id == session.client_id), None)
if not already_subscribed:
self._subscriptions[a_filter].append(Subscription(session, qos))
else:
self.logger.debug("Client %s has already subscribed to %s" % (format_client_message(session=session), a_filter))
return qos
except KeyError:
return 0x80
def del_subscription(self, a_filter, session):
try:
subscriptions = self._subscriptions[a_filter]
for index, subscription in enumerate(subscriptions):
if subscription.session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" %
(a_filter, format_client_message(session=session)))
subscriptions.pop(index)
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass
def matches(self, topic, filter):
import re
match_pattern = re.compile(filter.replace('#', '.*').replace('+', '[\s\w\d]+'))
if match_pattern.match(topic):
return True
else:
return False
@asyncio.coroutine
def broadcast_application_message(self, source_session, topic, data, force_qos=None):
self.logger.debug("Broadcasting message from %s on topic %s" %
(format_client_message(session=source_session), topic)
)
self.logger.debug("Current subscriptions: %s" % repr(self._subscriptions))
publish_tasks = []
try:
for k_filter in self._subscriptions:
if self.matches(topic, k_filter):
subscriptions = self._subscriptions[k_filter]
for subscription in subscriptions:
target_session = subscription.session
qos = subscription.qos
if force_qos is not None:
qos = force_qos
if target_session.machine.state == 'connected':
self.logger.debug("broadcasting application message from %s on topic '%s' to %s" %
(format_client_message(session=source_session),
topic, format_client_message(session=target_session)))
handler = subscription.session.handler
publish_tasks.append(
asyncio.Task(handler.mqtt_publish(topic, data, qos, retain=False))
)
else:
self.logger.debug("retaining application message from %s on topic '%s' to client '%s'" %
(format_client_message(session=source_session),
topic, format_client_message(session=target_session)))
retained_message = RetainedApplicationMessage(source_session, topic, data, qos)
publish_tasks.append(
asyncio.Task(target_session.retained_messages.put(retained_message))
)
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
except Exception as e:
self.logger.warn("Message broadcasting failed: %s", e)
self.logger.debug("End Broadcasting message from %s on topic %s" %
(format_client_message(session=source_session), topic)
)
@asyncio.coroutine
def publish_session_retained_messages(self, session):
self.logger.debug("Publishing %d messages retained for session %s" %
(session.retained_messages.qsize(), format_client_message(session=session))
)
publish_tasks = []
while not session.retained_messages.empty():
retained = yield from session.retained_messages.get()
publish_tasks.append(asyncio.Task(
session.handler.mqtt_publish(
retained.topic, retained.data, False, retained.qos, True)))
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
@asyncio.coroutine
def publish_retained_messages_for_subscription(self, subscription, session):
self.logger.debug("Begin broadcasting messages retained due to subscription on '%s' from %s" %
(subscription['filter'], format_client_message(session=session)))
publish_tasks = []
for d_topic in self._global_retained_messages:
self.logger.debug("matching : %s %s" % (d_topic, subscription['filter']))
if self.matches(d_topic, subscription['filter']):
self.logger.debug("%s and %s match" % (d_topic, subscription['filter']))
retained = self._global_retained_messages[d_topic]
publish_tasks.append(asyncio.Task(
session.handler.mqtt_publish(
retained.topic, retained.data, subscription['qos'], True)))
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
self.logger.debug("End broadcasting messages retained due to subscription on '%s' from %s" %
(subscription['filter'], format_client_message(session=session)))
Fix broker
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import logging
import asyncio
from transitions import Machine, MachineError
from hbmqtt.session import Session
from hbmqtt.mqtt.protocol.broker_handler import BrokerProtocolHandler
from hbmqtt.mqtt.connect import ConnectPacket
from hbmqtt.mqtt.connack import ConnackPacket, ReturnCode
from hbmqtt.errors import HBMQTTException
from hbmqtt.utils import format_client_message, gen_client_id
_defaults = {
'bind-address': 'localhost',
'bind-port': 1883,
'timeout-disconnect-delay': 10
}
class BrokerException(BaseException):
pass
class Subscription:
def __init__(self, session, qos):
self.session = session
self.qos = qos
class RetainedApplicationMessage:
def __init__(self, source_session, topic, data, qos=None):
self.source_session = source_session
self.topic = topic
self.data = data
self.qos = qos
class Broker:
states = ['new', 'starting', 'started', 'not_started', 'stopping', 'stopped', 'not_stopped', 'stopped']
def __init__(self, config=None, loop=None):
self.logger = logging.getLogger(__name__)
self.config = _defaults
if config is not None:
self.config.update(config)
if loop is not None:
self._loop = loop
else:
self._loop = asyncio.get_event_loop()
self._server = None
self._init_states()
self._sessions = dict()
self._subscriptions = dict()
self._global_retained_messages = dict()
def _init_states(self):
self.machine = Machine(states=Broker.states, initial='new')
self.machine.add_transition(trigger='start', source='new', dest='starting')
self.machine.add_transition(trigger='starting_fail', source='starting', dest='not_started')
self.machine.add_transition(trigger='starting_success', source='starting', dest='started')
self.machine.add_transition(trigger='shutdown', source='started', dest='stopping')
self.machine.add_transition(trigger='stopping_success', source='stopping', dest='stopped')
self.machine.add_transition(trigger='stopping_failure', source='stopping', dest='not_stopped')
self.machine.add_transition(trigger='start', source='stopped', dest='starting')
@asyncio.coroutine
def start(self):
try:
self.machine.start()
self.logger.debug("Broker starting")
except MachineError as me:
self.logger.debug("Invalid method call at this moment: %s" % me)
raise BrokerException("Broker instance can't be started: %s" % me)
try:
self._server = yield from asyncio.start_server(self.client_connected,
self.config['bind-address'],
self.config['bind-port'],
loop=self._loop)
self.logger.info("Broker listening on %s:%d" % (self.config['bind-address'], self.config['bind-port']))
self.machine.starting_success()
except Exception as e:
self.logger.error("Broker startup failed: %s" % e)
self.machine.starting_fail()
raise BrokerException("Broker instance can't be started: %s" % e)
@asyncio.coroutine
def shutdown(self):
try:
self.machine.shutdown()
except MachineError as me:
self.logger.debug("Invalid method call at this moment: %s" % me)
raise BrokerException("Broker instance can't be stopped: %s" % me)
self._server.close()
self.logger.debug("Broker closing")
yield from self._server.wait_closed()
self.logger.info("Broker closed")
self.machine.stopping_success()
@asyncio.coroutine
def client_connected(self, reader, writer):
extra_info = writer.get_extra_info('peername')
remote_address = extra_info[0]
remote_port = extra_info[1]
self.logger.debug("Connection from %s:%d" % (remote_address, remote_port))
# Wait for first packet and expect a CONNECT
connect = None
try:
connect = yield from ConnectPacket.from_stream(reader)
self.logger.debug(" <-in-- " + repr(connect))
self.check_connect(connect)
except HBMQTTException as exc:
self.logger.warn("[MQTT-3.1.0-1] %s: Can't read first packet an CONNECT: %s" %
(format_client_message(address=remote_address, port=remote_port), exc))
writer.close()
self.logger.debug("Connection closed")
return
except BrokerException as be:
self.logger.error('Invalid connection from %s : %s' %
(format_client_message(address=remote_address, port=remote_port), be))
writer.close()
self.logger.debug("Connection closed")
return
connack = None
if connect.variable_header.proto_level != 4:
# only MQTT 3.1.1 supported
self.logger.error('Invalid protocol from %s: %d' %
(format_client_message(address=remote_address, port=remote_port),
connect.variable_header.protocol_level))
connack = ConnackPacket.build(0, ReturnCode.UNACCEPTABLE_PROTOCOL_VERSION) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.username_flag and connect.payload.username is None:
self.logger.error('Invalid username from %s' %
(format_client_message(address=remote_address, port=remote_port)))
connack = ConnackPacket.build(0, ReturnCode.BAD_USERNAME_PASSWORD) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.password_flag and connect.payload.password is None:
self.logger.error('Invalid password %s' % (format_client_message(address=remote_address, port=remote_port)))
connack = ConnackPacket.build(0, ReturnCode.BAD_USERNAME_PASSWORD) # [MQTT-3.2.2-4] session_parent=0
elif connect.variable_header.clean_session_flag == False and connect.payload.client_id is None:
self.logger.error('[MQTT-3.1.3-8] [MQTT-3.1.3-9] %s: No client Id provided (cleansession=0)' %
format_client_message(address=remote_address, port=remote_port))
connack = ConnackPacket.build(0, ReturnCode.IDENTIFIER_REJECTED)
self.logger.debug(" -out-> " + repr(connack))
if connack is not None:
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
writer.close()
return
client_session = None
self.logger.debug("Clean session={0}".format(connect.variable_header.clean_session_flag))
self.logger.debug("known sessions={0}".format(self._sessions))
if connect.variable_header.clean_session_flag:
client_id = connect.payload.client_id
if client_id is not None and client_id in self._sessions:
# Delete existing session
del self._sessions[client_id]
client_session = Session()
client_session.parent = 0
self._sessions[client_id] = client_session
else:
# Get session from cache
client_id = connect.payload.client_id
if client_id in self._sessions:
self.logger.debug("Found old session %s" % repr(self._sessions[client_id]))
client_session = self._sessions[client_id]
client_session.parent = 1
else:
client_session = Session()
client_session.parent = 0
if client_session.client_id is None:
# Generate client ID
client_session.client_id = gen_client_id()
client_session.remote_address = remote_address
client_session.remote_port = remote_port
client_session.clean_session = connect.variable_header.clean_session_flag
client_session.will_flag = connect.variable_header.will_flag
client_session.will_retain = connect.variable_header.will_retain_flag
client_session.will_qos = connect.variable_header.will_qos
client_session.will_topic = connect.payload.will_topic
client_session.will_message = connect.payload.will_message
client_session.username = connect.payload.username
client_session.password = connect.payload.password
client_session.client_id = connect.payload.client_id
if connect.variable_header.keep_alive > 0:
client_session.keep_alive = connect.variable_header.keep_alive + self.config['timeout-disconnect-delay']
else:
client_session.keep_alive = 0
client_session.reader = reader
client_session.writer = writer
if self.authenticate(client_session):
connack = ConnackPacket.build(client_session.parent, ReturnCode.CONNECTION_ACCEPTED)
self.logger.info('%s : connection accepted' % format_client_message(session=client_session))
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
else:
connack = ConnackPacket.build(client_session.parent, ReturnCode.NOT_AUTHORIZED)
self.logger.info('%s : connection refused' % format_client_message(session=client_session))
self.logger.debug(" -out-> " + repr(connack))
yield from connack.to_stream(writer)
writer.close()
return
client_session.machine.connect()
handler = BrokerProtocolHandler(self._loop)
handler.attach_to_session(client_session)
self.logger.debug("%s Start messages handling" % client_session.client_id)
yield from handler.start()
yield from self.publish_session_retained_messages(client_session)
self.logger.debug("%s Wait for disconnect" % client_session.client_id)
connected = True
wait_disconnect = asyncio.Task(handler.wait_disconnect())
wait_subscription = asyncio.Task(handler.get_next_pending_subscription())
wait_unsubscription = asyncio.Task(handler.get_next_pending_unsubscription())
wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message())
disconnect_event = False
while connected:
done, pending = yield from asyncio.wait(
[wait_disconnect, wait_subscription, wait_unsubscription, wait_deliver],
return_when=asyncio.FIRST_COMPLETED)
if wait_disconnect in done:
if not disconnect_event:
result = wait_disconnect.result()
self.logger.debug("%s Result from wait_diconnect: %s" % (client_session.client_id, result))
if result is None:
self.logger.debug("Will flag: %s" % client_session.will_flag)
#Connection closed anormally, send will message
if client_session.will_flag:
self.logger.debug("Client %s disconnected abnormally, sending will message" %
format_client_message(client_session))
yield from self.broadcast_application_message(
client_session, client_session.will_topic,
client_session.will_message,
client_session.will_qos)
if client_session.will_retain:
self.retain_message(client_session,
client_session.will_topic,
client_session.will_message,
client_session.will_qos)
disconnect_event = True
if not (wait_unsubscription.done() or wait_subscription.done() or wait_deliver.done):
connected = False
if wait_unsubscription in done:
self.logger.debug("%s handling unsubscription" % client_session.client_id)
unsubscription = wait_unsubscription.result()
for topic in unsubscription['topics']:
self.del_subscription(topic, client_session)
yield from handler.mqtt_acknowledge_unsubscription(unsubscription['packet_id'])
wait_unsubscription = asyncio.Task(handler.get_next_pending_unsubscription())
if wait_subscription in done:
self.logger.debug("%s handling subscription" % client_session.client_id)
subscriptions = wait_subscription.result()
return_codes = []
for subscription in subscriptions['topics']:
return_codes.append(self.add_subscription(subscription, client_session))
yield from handler.mqtt_acknowledge_subscription(subscriptions['packet_id'], return_codes)
for index, subscription in enumerate(subscriptions['topics']):
if return_codes[index] != 0x80:
yield from self.publish_retained_messages_for_subscription(subscription, client_session)
wait_subscription = asyncio.Task(handler.get_next_pending_subscription())
self.logger.debug(repr(self._subscriptions))
if wait_deliver in done:
self.logger.debug("%s handling message delivery" % client_session.client_id)
publish_packet = wait_deliver.result().publish_packet
topic_name = publish_packet.variable_header.topic_name
data = publish_packet.payload.data
yield from self.broadcast_application_message(client_session, topic_name, data)
if publish_packet.retain_flag:
self.retain_message(client_session, topic_name, data)
wait_deliver = asyncio.Task(handler.mqtt_deliver_next_message())
wait_subscription.cancel()
wait_unsubscription.cancel()
wait_deliver.cancel()
self.logger.debug("%s Client disconnecting" % client_session.client_id)
try:
yield from handler.stop()
except Exception as e:
self.logger.error(e)
finally:
handler.detach_from_session()
handler = None
client_session.machine.disconnect()
writer.close()
self.logger.debug("%s Session disconnected" % client_session.client_id)
@asyncio.coroutine
def check_connect(self, connect: ConnectPacket):
if connect.payload.client_id is None:
raise BrokerException('[[MQTT-3.1.3-3]] : Client identifier must be present' )
if connect.variable_header.will_flag:
if connect.payload.will_topic is None or connect.payload.will_message is None:
raise BrokerException('will flag set, but will topic/message not present in payload')
if connect.variable_header.reserved_flag:
raise BrokerException('[MQTT-3.1.2-3] CONNECT reserved flag must be set to 0')
def authenticate(self, session: Session):
# TODO : Handle client authentication here
return True
def retain_message(self, source_session, topic_name, data, qos=None):
if data is not None and data != b'':
# If retained flag set, store the message for further subscriptions
self.logger.debug("%s Retaining message on topic %s" % (source_session.client_id, topic_name))
retained_message = RetainedApplicationMessage(source_session, topic_name, data, qos)
self._global_retained_messages[topic_name] = retained_message
else:
# [MQTT-3.3.1-10]
self.logger.debug("%s Clear retained messages for topic '%s'" % (source_session.client_id, topic_name))
del self._global_retained_messages[topic_name]
def add_subscription(self, subscription, session):
import re
#wildcard_pattern = re.compile('(/.+?\+)|(/\+.+?)|(/.+?\+.+?)')
wildcard_pattern = re.compile('.*?/?\+/?.*?')
try:
a_filter = subscription['filter']
if '#' in a_filter and not a_filter.endswith('#'):
# [MQTT-4.7.1-2] Wildcard character '#' is only allowed as last character in filter
return 0x80
if '+' in a_filter and not wildcard_pattern.match(a_filter):
# [MQTT-4.7.1-3] + wildcard character must occupy entire level
return 0x80
qos = subscription['qos']
if 'max-qos' in self.config and qos > self.config['max-qos']:
qos = self.config['max-qos']
if a_filter not in self._subscriptions:
self._subscriptions[a_filter] = []
already_subscribed = next(
(s for s in self._subscriptions[a_filter] if s.session.client_id == session.client_id), None)
if not already_subscribed:
self._subscriptions[a_filter].append(Subscription(session, qos))
else:
self.logger.debug("Client %s has already subscribed to %s" % (format_client_message(session=session), a_filter))
return qos
except KeyError:
return 0x80
def del_subscription(self, a_filter, session):
try:
subscriptions = self._subscriptions[a_filter]
for index, subscription in enumerate(subscriptions):
if subscription.session.client_id == session.client_id:
self.logger.debug("Removing subscription on topic '%s' for client %s" %
(a_filter, format_client_message(session=session)))
subscriptions.pop(index)
except KeyError:
# Unsubscribe topic not found in current subscribed topics
pass
def matches(self, topic, filter):
import re
match_pattern = re.compile(filter.replace('#', '.*').replace('+', '[\s\w\d]+'))
if match_pattern.match(topic):
return True
else:
return False
@asyncio.coroutine
def broadcast_application_message(self, source_session, topic, data, force_qos=None):
self.logger.debug("Broadcasting message from %s on topic %s" %
(format_client_message(session=source_session), topic)
)
self.logger.debug("Current subscriptions: %s" % repr(self._subscriptions))
publish_tasks = []
try:
for k_filter in self._subscriptions:
if self.matches(topic, k_filter):
subscriptions = self._subscriptions[k_filter]
for subscription in subscriptions:
target_session = subscription.session
qos = subscription.qos
if force_qos is not None:
qos = force_qos
if target_session.machine.state == 'connected':
self.logger.debug("broadcasting application message from %s on topic '%s' to %s" %
(format_client_message(session=source_session),
topic, format_client_message(session=target_session)))
handler = subscription.session.handler
publish_tasks.append(
asyncio.Task(handler.mqtt_publish(topic, data, qos, retain=False))
)
else:
self.logger.debug("retaining application message from %s on topic '%s' to client '%s'" %
(format_client_message(session=source_session),
topic, format_client_message(session=target_session)))
retained_message = RetainedApplicationMessage(source_session, topic, data, qos)
publish_tasks.append(
asyncio.Task(target_session.retained_messages.put(retained_message))
)
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
except Exception as e:
self.logger.warn("Message broadcasting failed: %s", e)
self.logger.debug("End Broadcasting message from %s on topic %s" %
(format_client_message(session=source_session), topic)
)
@asyncio.coroutine
def publish_session_retained_messages(self, session):
self.logger.debug("Publishing %d messages retained for session %s" %
(session.retained_messages.qsize(), format_client_message(session=session))
)
publish_tasks = []
while not session.retained_messages.empty():
retained = yield from session.retained_messages.get()
publish_tasks.append(asyncio.Task(
session.handler.mqtt_publish(
retained.topic, retained.data, False, retained.qos, True)))
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
@asyncio.coroutine
def publish_retained_messages_for_subscription(self, subscription, session):
self.logger.debug("Begin broadcasting messages retained due to subscription on '%s' from %s" %
(subscription['filter'], format_client_message(session=session)))
publish_tasks = []
for d_topic in self._global_retained_messages:
self.logger.debug("matching : %s %s" % (d_topic, subscription['filter']))
if self.matches(d_topic, subscription['filter']):
self.logger.debug("%s and %s match" % (d_topic, subscription['filter']))
retained = self._global_retained_messages[d_topic]
publish_tasks.append(asyncio.Task(
session.handler.mqtt_publish(
retained.topic, retained.data, subscription['qos'], True)))
if len(publish_tasks) > 0:
asyncio.wait(publish_tasks)
self.logger.debug("End broadcasting messages retained due to subscription on '%s' from %s" %
(subscription['filter'], format_client_message(session=session)))
|
# -*- coding: utf8 -*-
import json
import urllib
import urlparse
from django.http import QueryDict
from django.test import client
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from applications.models import AppVersion
from addons.tests.test_views import TestMobile
from search.tests import SphinxTestCase
from search import views
from search.client import SearchError
from addons.models import Addon, Category
from tags.models import AddonTag, Tag
def test_parse_bad_type():
"""
Given a type that doesn't exist, we should not throw a KeyError.
Note: This does not require sphinx to be running.
"""
c = client.Client()
try:
c.get("/en-US/firefox/api/1.2/search/firebug%20type:dict")
except KeyError: # pragma: no cover
assert False, ("We should not throw a KeyError just because we had a "
"nonexistent addon type.")
class MobileSearchTest(SphinxTestCase, TestMobile):
def test_search(self):
r = self.client.get(reverse('search.search'))
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'search/mobile/results.html')
class ViewTest(amo.tests.TestCase):
"""Tests some of the functions used in building the view."""
fixtures = ('base/category',)
def setUp(self):
self.fake_request = Mock()
self.fake_request.get_full_path = lambda: 'http://fatgir.ls/'
def test_get_categories(self):
cats = Category.objects.all()
cat = cats[0].id
# Select a category.
items = views._get_categories(self.fake_request, cats, category=cat)
eq_(len(cats), len(items[1].children))
assert any((i.selected for i in items[1].children))
# Select an addon type.
atype = cats[0].type
items = views._get_categories(self.fake_request, cats,
addon_type=atype)
assert any((i.selected for i in items))
def test_get_tags(self):
t = Tag(tag_text='yermom')
assert views._get_tags(self.fake_request, tags=[t], selected='yermom')
class TestAdminDisabledAddons(SphinxTestCase):
fixtures = ('base/addon_3615',)
def setUp(self):
Addon.objects.get(pk=3615).update(status=amo.STATUS_DISABLED)
super(TestAdminDisabledAddons, self).setUp()
class TestSearchboxTarget(amo.tests.TestCase):
# Check that we search within addons/personas/collections as appropriate.
def check(self, url, placeholder, cat):
doc = pq(self.client.get(url).content)('.header-search form')
eq_(doc('input[name=q]').attr('placeholder'), placeholder)
eq_(doc('input[name=cat]').val(), cat)
def test_addons_is_default(self):
self.check(reverse('home'), 'search for add-ons', 'all')
def test_themes(self):
self.check(reverse('browse.themes'), 'search for add-ons',
'%s,0' % amo.ADDON_THEME)
def test_collections(self):
self.check(reverse('collections.list'), 'search for collections',
'collections')
def test_personas(self):
self.check(reverse('browse.personas'), 'search for personas',
'personas')
class TestESSearch(amo.tests.TestCase):
def test_legacy_redirects(self):
base = reverse('search.es_search')
r = self.client.get(base + '?sort=averagerating')
self.assertRedirects(r, base + '?sort=rating', status_code=301)
def test_search_redirects():
changes = (
('q=yeah&sort=newest', 'q=yeah&sort=updated'),
('sort=weeklydownloads', 'sort=users'),
('sort=averagerating', 'sort=rating'),
('lver=5.*', 'appver=5.*'),
('q=woo&sort=averagerating&lver=6.0', 'q=woo&sort=rating&appver=6.0'),
('pid=2', 'platform=linux'),
('q=woo&lver=6.0&sort=users&pid=5',
'q=woo&appver=6.0&sort=users&platform=windows'),
)
def check(before, after):
eq_(views.fix_search_query(QueryDict(before)),
dict(urlparse.parse_qsl(after)))
for before, after in changes:
yield check, before, after
queries = (
'q=yeah',
'q=yeah&sort=users',
'sort=users',
'q=yeah&appver=6.0',
'q=yeah&appver=6.0&platform=mac',
)
def same(qs):
q = QueryDict(qs)
assert views.fix_search_query(q) is q
for qs in queries:
yield same, qs
This test requires elastic search
# -*- coding: utf8 -*-
import json
import urllib
import urlparse
from django.http import QueryDict
from django.test import client
from mock import Mock, patch
from nose.tools import eq_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from applications.models import AppVersion
from addons.tests.test_views import TestMobile
from search.tests import SphinxTestCase
from search import views
from search.client import SearchError
from addons.models import Addon, Category
from tags.models import AddonTag, Tag
def test_parse_bad_type():
"""
Given a type that doesn't exist, we should not throw a KeyError.
Note: This does not require sphinx to be running.
"""
c = client.Client()
try:
c.get("/en-US/firefox/api/1.2/search/firebug%20type:dict")
except KeyError: # pragma: no cover
assert False, ("We should not throw a KeyError just because we had a "
"nonexistent addon type.")
class MobileSearchTest(SphinxTestCase, TestMobile):
def test_search(self):
r = self.client.get(reverse('search.search'))
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'search/mobile/results.html')
class ViewTest(amo.tests.TestCase):
"""Tests some of the functions used in building the view."""
fixtures = ('base/category',)
def setUp(self):
self.fake_request = Mock()
self.fake_request.get_full_path = lambda: 'http://fatgir.ls/'
def test_get_categories(self):
cats = Category.objects.all()
cat = cats[0].id
# Select a category.
items = views._get_categories(self.fake_request, cats, category=cat)
eq_(len(cats), len(items[1].children))
assert any((i.selected for i in items[1].children))
# Select an addon type.
atype = cats[0].type
items = views._get_categories(self.fake_request, cats,
addon_type=atype)
assert any((i.selected for i in items))
def test_get_tags(self):
t = Tag(tag_text='yermom')
assert views._get_tags(self.fake_request, tags=[t], selected='yermom')
class TestAdminDisabledAddons(SphinxTestCase):
fixtures = ('base/addon_3615',)
def setUp(self):
Addon.objects.get(pk=3615).update(status=amo.STATUS_DISABLED)
super(TestAdminDisabledAddons, self).setUp()
class TestSearchboxTarget(amo.tests.TestCase):
# Check that we search within addons/personas/collections as appropriate.
def check(self, url, placeholder, cat):
doc = pq(self.client.get(url).content)('.header-search form')
eq_(doc('input[name=q]').attr('placeholder'), placeholder)
eq_(doc('input[name=cat]').val(), cat)
def test_addons_is_default(self):
self.check(reverse('home'), 'search for add-ons', 'all')
def test_themes(self):
self.check(reverse('browse.themes'), 'search for add-ons',
'%s,0' % amo.ADDON_THEME)
def test_collections(self):
self.check(reverse('collections.list'), 'search for collections',
'collections')
def test_personas(self):
self.check(reverse('browse.personas'), 'search for personas',
'personas')
class TestESSearch(amo.tests.ESTestCase):
def test_legacy_redirects(self):
base = reverse('search.es_search')
r = self.client.get(base + '?sort=averagerating')
self.assertRedirects(r, base + '?sort=rating', status_code=301)
def test_search_redirects():
changes = (
('q=yeah&sort=newest', 'q=yeah&sort=updated'),
('sort=weeklydownloads', 'sort=users'),
('sort=averagerating', 'sort=rating'),
('lver=5.*', 'appver=5.*'),
('q=woo&sort=averagerating&lver=6.0', 'q=woo&sort=rating&appver=6.0'),
('pid=2', 'platform=linux'),
('q=woo&lver=6.0&sort=users&pid=5',
'q=woo&appver=6.0&sort=users&platform=windows'),
)
def check(before, after):
eq_(views.fix_search_query(QueryDict(before)),
dict(urlparse.parse_qsl(after)))
for before, after in changes:
yield check, before, after
queries = (
'q=yeah',
'q=yeah&sort=users',
'sort=users',
'q=yeah&appver=6.0',
'q=yeah&appver=6.0&platform=mac',
)
def same(qs):
q = QueryDict(qs)
assert views.fix_search_query(q) is q
for qs in queries:
yield same, qs
|
from typing import List
from easypost.easypost_object import EasyPostObject
from easypost.error import Error
def get_lowest_object_rate(
easypost_object: EasyPostObject,
carriers: List[str] = None,
services: List[str] = None,
rates_key: str = "rates",
):
"""Gets the lowest rate of an EasyPost object such as a Shipment, Order, or Pickup."""
carriers = carriers or []
services = services or []
lowest_rate = None
carriers = [carrier.lower() for carrier in carriers]
services = [service.lower() for service in services]
for rate in easypost_object.get(rates_key, []):
if carriers and rate.carrier.lower() not in carriers:
continue
elif services and rate.service.lower() not in services:
continue
elif lowest_rate is None or float(rate.rate) < float(lowest_rate.rate):
lowest_rate = rate
if lowest_rate is None:
raise Error(message="No rates found.")
return lowest_rate
fix: if statements
from typing import List
from easypost.easypost_object import EasyPostObject
from easypost.error import Error
def get_lowest_object_rate(
easypost_object: EasyPostObject,
carriers: List[str] = None,
services: List[str] = None,
rates_key: str = "rates",
):
"""Gets the lowest rate of an EasyPost object such as a Shipment, Order, or Pickup."""
carriers = carriers or []
services = services or []
lowest_rate = None
carriers = [carrier.lower() for carrier in carriers]
services = [service.lower() for service in services]
for rate in easypost_object.get(rates_key, []):
if (carriers and rate.carrier.lower() not in carriers) or (services and rate.service.lower() not in services):
continue
if lowest_rate is None or float(rate.rate) < float(lowest_rate.rate):
lowest_rate = rate
if lowest_rate is None:
raise Error(message="No rates found.")
return lowest_rate
|
# -*- coding: utf-8 -*-
import pybayes as pb
import numpy as np
from qsrrep_pf.probability_density_functions import PredictionPdf, ObservationPdf, UniIntPdf
from qsrrep_pf.particle_filter_base import ParticleFilter
from collections import OrderedDict
from copy import deepcopy
import time
class ParticleFilterPredictor(object):
def create(self, **kwargs):
models = OrderedDict(kwargs["models"])
states = kwargs["state_lookup_table"]
x_t = pb.RVComp(2, 'x_t')
p = PredictionPdf(models=models, states=states, rv=x_t, cond_rv=pb.RVComp(2, 'x_tp'))
o = ObservationPdf(states=states, models=models, rv=pb.RVComp(2, 'y_t'), cond_rv=[x_t])
# prepare initial particle density:
init_pdf = UniIntPdf(
np.array([0., 0.]),
np.array([float(len(states)-1), float(len(models.keys())-1)]),
cheat=kwargs["ensure_particle_per_state"]
)
return {
"filter": ParticleFilter(kwargs["num_particles"], init_pdf, p, o, starvation_factor=1.-kwargs["starvation_factor"]),
"models": models.keys(),
"states": kwargs["state_lookup_table"]
}
def predict(self, **kwargs):
ret = []
if kwargs["debug"]: start = time.time()
p = deepcopy(kwargs["filter"].emp.particles)
for n in range(kwargs["num_steps"]):
if kwargs["debug"]: start = time.time()
pn = kwargs["filter"].p_xt_xtp.sample_multiple(p)
if kwargs["debug"]:
print "elapsed", time.time() -start
print "###############################################################"
print pn
_,bs,sp,_,bm,mp = self._get_best_state_and_model(pn, **kwargs)
ret.append((bs,sp,bm,mp))
if kwargs["debug"]: print "total elapsed", time.time() -start
return ret
def update(self, **kwargs):
obs = kwargs["observation"]
if kwargs["debug"]:
start = time.time()
print obs
kwargs["filter"].bayes(np.array([kwargs["states"].index(obs), np.nan]))
p = kwargs["filter"].emp.particles
if kwargs["debug"]:
print "###############################################################"
print "OBS:", obs, kwargs["states"].index(obs)
try:
print "MODELS:", kwargs["models"]
print "MODEL SIZES:", np.bincount(map(int,p[:,1].flatten()), minlength=len(kwargs["models"]))
except:
pass
_,bs,sp,_,bm,mp = self._get_best_state_and_model(p, **kwargs)
if kwargs["debug"]: print "total elapsed", time.time()-start
return bs,sp,bm,mp
def _get_best_state_and_model(self, p, **kwargs):
state_bins = np.bincount(map(int,p[:,0].flatten()))
model_bins = np.bincount(map(int,p[:,1].flatten()))
best_state = state_bins.argmax()
best_model = model_bins.argmax()
if kwargs["debug"]:
print state_bins, len(state_bins)
print model_bins, len(model_bins)
print best_state, best_model, len(kwargs["states"])
state_prob = float(state_bins[best_state])/float(np.sum(state_bins))
model_prob = float(model_bins[best_model])/float(np.sum(model_bins))
return best_state, kwargs["states"][best_state], state_prob, best_model, kwargs["models"][best_model], model_prob
A little bit of error handling for incorrect model and state numbers.
# -*- coding: utf-8 -*-
import pybayes as pb
import numpy as np
from qsrrep_pf.probability_density_functions import PredictionPdf, ObservationPdf, UniIntPdf
from qsrrep_pf.particle_filter_base import ParticleFilter
from collections import OrderedDict
from copy import deepcopy
import time
class ParticleFilterPredictor(object):
def create(self, **kwargs):
models = OrderedDict(kwargs["models"])
states = kwargs["state_lookup_table"]
x_t = pb.RVComp(2, 'x_t')
p = PredictionPdf(models=models, states=states, rv=x_t, cond_rv=pb.RVComp(2, 'x_tp'))
o = ObservationPdf(states=states, models=models, rv=pb.RVComp(2, 'y_t'), cond_rv=[x_t])
# prepare initial particle density:
try:
init_pdf = UniIntPdf(
np.array([0., 0.]),
np.array([float(len(states)-1), float(len(models.keys())-1)]),
cheat=kwargs["ensure_particle_per_state"]
)
except ValueError as e:
print "### Encountered a problem while creating intial particle distribution:", e
print "### This might happen if there is only one model or one state defined."
print "### Currently defined number of states: %s and number of models: %s" % (str(len(states)),str(len(models.keys()))), models.keys()
return None
return {
"filter": ParticleFilter(kwargs["num_particles"], init_pdf, p, o, starvation_factor=1.-kwargs["starvation_factor"]),
"models": models.keys(),
"states": kwargs["state_lookup_table"]
}
def predict(self, **kwargs):
ret = []
if kwargs["debug"]: start = time.time()
p = deepcopy(kwargs["filter"].emp.particles)
for n in range(kwargs["num_steps"]):
if kwargs["debug"]: start = time.time()
pn = kwargs["filter"].p_xt_xtp.sample_multiple(p)
if kwargs["debug"]:
print "elapsed", time.time() -start
print "###############################################################"
print pn
_,bs,sp,_,bm,mp = self._get_best_state_and_model(pn, **kwargs)
ret.append((bs,sp,bm,mp))
if kwargs["debug"]: print "total elapsed", time.time() -start
return ret
def update(self, **kwargs):
obs = kwargs["observation"]
if kwargs["debug"]:
start = time.time()
print obs
kwargs["filter"].bayes(np.array([kwargs["states"].index(obs), np.nan]))
p = kwargs["filter"].emp.particles
if kwargs["debug"]:
print "###############################################################"
print "OBS:", obs, kwargs["states"].index(obs)
try:
print "MODELS:", kwargs["models"]
print "MODEL SIZES:", np.bincount(map(int,p[:,1].flatten()), minlength=len(kwargs["models"]))
except:
pass
_,bs,sp,_,bm,mp = self._get_best_state_and_model(p, **kwargs)
if kwargs["debug"]: print "total elapsed", time.time()-start
return bs,sp,bm,mp
def _get_best_state_and_model(self, p, **kwargs):
state_bins = np.bincount(map(int,p[:,0].flatten()))
model_bins = np.bincount(map(int,p[:,1].flatten()))
best_state = state_bins.argmax()
best_model = model_bins.argmax()
if kwargs["debug"]:
print state_bins, len(state_bins)
print model_bins, len(model_bins)
print best_state, best_model, len(kwargs["states"])
state_prob = float(state_bins[best_state])/float(np.sum(state_bins))
model_prob = float(model_bins[best_model])/float(np.sum(model_bins))
return best_state, kwargs["states"][best_state], state_prob, best_model, kwargs["models"][best_model], model_prob
|
#!/usr/bin/env python
# Copyright 2017 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is a modified copy of the script in
# https://chromium.googlesource.com/external/webrtc/+/master/tools-webrtc/autoroller/roll_deps.py
# customized for libyuv.
"""Script to automatically roll dependencies in the libyuv DEPS file."""
import argparse
import base64
import collections
import logging
import os
import re
import subprocess
import sys
import urllib
# Skip these dependencies (list without solution name prefix).
DONT_AUTOROLL_THESE = [
'src/third_party/gflags/src',
]
LIBYUV_URL = 'https://chromium.googlesource.com/libyuv/libyuv'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
CHROMIUM_LOG_TEMPLATE = CHROMIUM_SRC_URL + '/+log/%s'
CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s'
COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'(\d+)\'$')
ROLL_BRANCH_NAME = 'roll_chromium_revision'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, os.pardir,
os.pardir))
CHECKOUT_ROOT_DIR = os.path.realpath(os.path.join(CHECKOUT_SRC_DIR, os.pardir))
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
CLANG_UPDATE_SCRIPT_URL_PATH = 'tools/clang/scripts/update.py'
CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools',
'clang', 'scripts', 'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple('ChangedDep',
'path url current_rev new_rev')
class RollError(Exception):
pass
def VarLookup(local_scope):
return lambda var_name: local_scope['vars'][var_name]
def ParseDepsDict(deps_content):
local_scope = {}
global_scope = {
'Var': VarLookup(local_scope),
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
return local_scope
def ParseLocalDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return ParseDepsDict(deps_content)
def ParseRemoteCrDepsFile(revision):
deps_content = ReadRemoteCrFile('DEPS', revision)
return ParseDepsDict(deps_content)
def ParseCommitPosition(commit_message):
for line in reversed(commit_message.splitlines()):
m = COMMIT_POSITION_RE.match(line.strip())
if m:
return m.group(1)
logging.error('Failed to parse commit position id from:\n%s\n',
commit_message)
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False,
extra_env=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
Returns:
A tuple containing the stdout and stderr outputs as strings.
"""
working_dir = working_dir or CHECKOUT_SRC_DIR
logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
assert all(type(value) == str for value in extra_env.values())
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
cwd=working_dir, universal_newlines=True)
std_output = p.stdout.read()
err_output = p.stderr.read()
p.wait()
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
sys.exit(p.returncode)
return std_output, err_output
def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _ReadGitilesContent(url):
# Download and decode BASE64 content until
# https://code.google.com/p/gitiles/issues/detail?id=7 is fixed.
base64_content = ReadUrlContent(url + '?format=TEXT')
return base64.b64decode(base64_content[0])
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision,
path_below_src))
def ReadRemoteCrCommit(revision):
"""Reads a remote Chromium commit message. Returns a string."""
return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision)
def ReadUrlContent(url):
"""Connect to a remote host and read the contents. Returns a list of lines."""
conn = urllib.urlopen(url)
try:
return conn.readlines()
except IOError as e:
logging.exception('Error connecting to %s. Error: %s', url, e)
raise
finally:
conn.close()
def GetMatchingDepsEntries(depsentry_dict, dir_path):
"""Gets all deps entries matching the provided path.
This list may contain more than one DepsEntry object.
Example: dir_path='src/testing' would give results containing both
'src/testing/gtest' and 'src/testing/gmock' deps entries for Chromium's DEPS.
Example 2: dir_path='src/build' should return 'src/build' but not
'src/buildtools'.
Returns:
A list of DepsEntry objects.
"""
result = []
for path, depsentry in depsentry_dict.iteritems():
if path == dir_path:
result.append(depsentry)
else:
parts = path.split('/')
if all(part == parts[i]
for i, part in enumerate(dir_path.split('/'))):
result.append(depsentry)
return result
def BuildDepsentryDict(deps_dict):
"""Builds a dict of paths to DepsEntry objects from a raw parsed deps dict."""
result = {}
def AddDepsEntries(deps_subdict):
for path, deps_url_spec in deps_subdict.iteritems():
# The deps url is either an URL and a condition, or just the URL.
if isinstance(deps_url_spec, dict):
deps_url = deps_url_spec['url']
else:
deps_url = deps_url_spec
if not result.has_key(path):
url, revision = deps_url.split('@') if deps_url else (None, None)
result[path] = DepsEntry(path, url, revision)
AddDepsEntries(deps_dict['deps'])
for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']:
AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {}))
return result
def CalculateChangedDeps(libyuv_deps, new_cr_deps):
"""
Calculate changed deps entries based on entries defined in the libyuv DEPS
file:
- If a shared dependency with the Chromium DEPS file: roll it to the same
revision as Chromium (i.e. entry in the new_cr_deps dict)
- If it's a Chromium sub-directory, roll it to the HEAD revision (notice
this means it may be ahead of the chromium_revision, but generally these
should be close).
- If it's another DEPS entry (not shared with Chromium), roll it to HEAD
unless it's configured to be skipped.
Returns:
A list of ChangedDep objects representing the changed deps.
"""
result = []
libyuv_entries = BuildDepsentryDict(libyuv_deps)
new_cr_entries = BuildDepsentryDict(new_cr_deps)
for path, libyuv_deps_entry in libyuv_entries.iteritems():
if path in DONT_AUTOROLL_THESE:
continue
cr_deps_entry = new_cr_entries.get(path)
if cr_deps_entry:
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert libyuv_deps_entry.url == cr_deps_entry.url, (
'Libyuv DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, libyuv_deps_entry.url, cr_deps_entry.url))
else:
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', libyuv_deps_entry.url,
'HEAD'])
new_rev = stdout.strip().split('\t')[0]
# Check if an update is necessary.
if libyuv_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(ChangedDep(path, libyuv_deps_entry.url,
libyuv_deps_entry.revision, new_rev))
return sorted(result)
def CalculateChangedClang(new_cr_rev):
def GetClangRev(lines):
for line in lines:
match = CLANG_REVISION_RE.match(line)
if match:
return match.group(1)
raise RollError('Could not parse Clang revision!')
with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f:
current_lines = f.readlines()
current_rev = GetClangRev(current_lines)
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH,
new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
def GenerateCommitMessage(current_cr_rev, new_cr_rev, current_commit_pos,
new_commit_pos, changed_deps_list, clang_change):
current_cr_rev = current_cr_rev[0:10]
new_cr_rev = new_cr_rev[0:10]
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval,
git_number_interval)]
commit_msg.append('Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval))
commit_msg.append('Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE %
rev_interval))
# TBR field will be empty unless in some custom cases, where some engineers
# are added.
tbr_authors = ''
if changed_deps_list:
commit_msg.append('Changed dependencies:')
for c in changed_deps_list:
commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url,
c.current_rev[0:10],
c.new_rev[0:10]))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
commit_msg.append('No dependencies changed.')
if clang_change.current_rev != clang_change.new_rev:
commit_msg.append('Clang version changed %s:%s' %
(clang_change.current_rev, clang_change.new_rev))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval,
CLANG_UPDATE_SCRIPT_URL_PATH)
commit_msg.append('Details: %s\n' % change_url)
else:
commit_msg.append('No update to Clang.\n')
commit_msg.append('TBR=%s' % tbr_authors)
commit_msg.append('BUG=None')
return '\n'.join(commit_msg)
def UpdateDepsFile(deps_filename, old_cr_revision, new_cr_revision,
changed_deps):
"""Update the DEPS file with the new revision."""
# Update the chromium_revision variable.
with open(deps_filename, 'rb') as deps_file:
deps_content = deps_file.read()
deps_content = deps_content.replace(old_cr_revision, new_cr_revision)
with open(deps_filename, 'wb') as deps_file:
deps_file.write(deps_content)
# Update each individual DEPS entry.
for dep in changed_deps:
local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path)
if not os.path.isdir(local_dep_dir):
raise RollError(
'Cannot find local directory %s. Make sure the .gclient file\n'
'contains all platforms in the target_os list, i.e.\n'
'target_os = ["android", "unix", "mac", "ios", "win"];\n'
'Then run "gclient sync" again.' % local_dep_dir)
_RunCommand(
['gclient', 'setdep', '--revision', '%s@%s' % (dep.path, dep.new_rev)],
working_dir=CHECKOUT_SRC_DIR)
def _IsTreeClean():
stdout, _ = _RunCommand(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
logging.error('Dirty/unversioned files:\n%s', stdout)
return False
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
logging.info('Updating master branch...')
_RunCommand(['git', 'pull'])
def _CreateRollBranch(dry_run):
logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
def _RemovePreviousRollBranch(dry_run):
active_branch, branches = _GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', active_branch])
_RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
def _LocalCommit(commit_msg, dry_run):
logging.info('Committing changes locally.')
if not dry_run:
_RunCommand(['git', 'add', '--update', '.'])
_RunCommand(['git', 'commit', '-m', commit_msg])
def _UploadCL(dry_run, rietveld_email=None):
logging.info('Uploading CL...')
if not dry_run:
cmd = ['git', 'cl', 'upload', '-f']
if rietveld_email:
cmd.append('--email=%s' % rietveld_email)
_RunCommand(cmd, extra_env={'EDITOR': 'true'})
def _SendToCQ(dry_run, skip_cq):
logging.info('Sending the CL to the CQ...')
if not dry_run and not skip_cq:
_RunCommand(['git', 'cl', 'set_commit'])
logging.info('Sent the CL to the CQ.')
def main():
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', default=False,
help='Removes any previous local roll branch.')
p.add_argument('-r', '--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument('-u', '--rietveld-email',
help=('E-mail address to use for creating the CL at Rietveld'
'If omitted a previously cached one will be used or an '
'error will be thrown during upload.'))
p.add_argument('--dry-run', action='store_true', default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument('-i', '--ignore-unclean-workdir', action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
p.add_argument('--skip-cq', action='store_true', default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not opts.ignore_unclean_workdir and not _IsTreeClean():
logging.error('Please clean your local checkout first.')
return 1
if opts.clean:
_RemovePreviousRollBranch(opts.dry_run)
if not opts.ignore_unclean_workdir:
_EnsureUpdatedMasterBranch(opts.dry_run)
new_cr_rev = opts.revision
if not new_cr_rev:
stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD'])
head_rev = stdout.strip().split('\t')[0]
logging.info('No revision specified. Using HEAD: %s', head_rev)
new_cr_rev = head_rev
deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS')
libyuv_deps = ParseLocalDepsFile(deps_filename)
current_cr_rev = libyuv_deps['vars']['chromium_revision']
current_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(current_cr_rev))
new_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(new_cr_rev))
new_cr_deps = ParseRemoteCrDepsFile(new_cr_rev)
changed_deps = CalculateChangedDeps(libyuv_deps, new_cr_deps)
clang_change = CalculateChangedClang(new_cr_rev)
commit_msg = GenerateCommitMessage(current_cr_rev, new_cr_rev,
current_commit_pos, new_commit_pos,
changed_deps, clang_change)
logging.debug('Commit message:\n%s', commit_msg)
_CreateRollBranch(opts.dry_run)
UpdateDepsFile(deps_filename, current_cr_rev, new_cr_rev, changed_deps)
_LocalCommit(commit_msg, opts.dry_run)
_UploadCL(opts.dry_run, opts.rietveld_email)
_SendToCQ(opts.dry_run, opts.skip_cq)
return 0
if __name__ == '__main__':
sys.exit(main())
Skip cipd dependencies when autorolling.
This is a stop-gap; it will simply cause cipd deps to not be updated,
which will probably keep things working for now, but it's not what we
want for the long term.
Bug: chromium:659808
Change-Id: I292b96f174c8d910c0b5f0196eefd0e5a5f907c2
Reviewed-on: https://chromium-review.googlesource.com/1016380
Reviewed-by: Frank Barchard <52415189a3f67d375195cbbb6fdb1f1759498e57@chromium.org>
Commit-Queue: Frank Barchard <52415189a3f67d375195cbbb6fdb1f1759498e57@chromium.org>
#!/usr/bin/env python
# Copyright 2017 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is a modified copy of the script in
# https://chromium.googlesource.com/external/webrtc/+/master/tools-webrtc/autoroller/roll_deps.py
# customized for libyuv.
"""Script to automatically roll dependencies in the libyuv DEPS file."""
import argparse
import base64
import collections
import logging
import os
import re
import subprocess
import sys
import urllib
# Skip these dependencies (list without solution name prefix).
DONT_AUTOROLL_THESE = [
'src/third_party/gflags/src',
]
LIBYUV_URL = 'https://chromium.googlesource.com/libyuv/libyuv'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
CHROMIUM_LOG_TEMPLATE = CHROMIUM_SRC_URL + '/+log/%s'
CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s'
COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'(\d+)\'$')
ROLL_BRANCH_NAME = 'roll_chromium_revision'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_SRC_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, os.pardir,
os.pardir))
CHECKOUT_ROOT_DIR = os.path.realpath(os.path.join(CHECKOUT_SRC_DIR, os.pardir))
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
CLANG_UPDATE_SCRIPT_URL_PATH = 'tools/clang/scripts/update.py'
CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools',
'clang', 'scripts', 'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple('ChangedDep',
'path url current_rev new_rev')
class RollError(Exception):
pass
def VarLookup(local_scope):
return lambda var_name: local_scope['vars'][var_name]
def ParseDepsDict(deps_content):
local_scope = {}
global_scope = {
'Var': VarLookup(local_scope),
'deps_os': {},
}
exec(deps_content, global_scope, local_scope)
return local_scope
def ParseLocalDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return ParseDepsDict(deps_content)
def ParseRemoteCrDepsFile(revision):
deps_content = ReadRemoteCrFile('DEPS', revision)
return ParseDepsDict(deps_content)
def ParseCommitPosition(commit_message):
for line in reversed(commit_message.splitlines()):
m = COMMIT_POSITION_RE.match(line.strip())
if m:
return m.group(1)
logging.error('Failed to parse commit position id from:\n%s\n',
commit_message)
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False,
extra_env=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
Returns:
A tuple containing the stdout and stderr outputs as strings.
"""
working_dir = working_dir or CHECKOUT_SRC_DIR
logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
assert all(type(value) == str for value in extra_env.values())
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env,
cwd=working_dir, universal_newlines=True)
std_output = p.stdout.read()
err_output = p.stderr.read()
p.wait()
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
sys.exit(p.returncode)
return std_output, err_output
def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _ReadGitilesContent(url):
# Download and decode BASE64 content until
# https://code.google.com/p/gitiles/issues/detail?id=7 is fixed.
base64_content = ReadUrlContent(url + '?format=TEXT')
return base64.b64decode(base64_content[0])
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision,
path_below_src))
def ReadRemoteCrCommit(revision):
"""Reads a remote Chromium commit message. Returns a string."""
return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision)
def ReadUrlContent(url):
"""Connect to a remote host and read the contents. Returns a list of lines."""
conn = urllib.urlopen(url)
try:
return conn.readlines()
except IOError as e:
logging.exception('Error connecting to %s. Error: %s', url, e)
raise
finally:
conn.close()
def GetMatchingDepsEntries(depsentry_dict, dir_path):
"""Gets all deps entries matching the provided path.
This list may contain more than one DepsEntry object.
Example: dir_path='src/testing' would give results containing both
'src/testing/gtest' and 'src/testing/gmock' deps entries for Chromium's DEPS.
Example 2: dir_path='src/build' should return 'src/build' but not
'src/buildtools'.
Returns:
A list of DepsEntry objects.
"""
result = []
for path, depsentry in depsentry_dict.iteritems():
if path == dir_path:
result.append(depsentry)
else:
parts = path.split('/')
if all(part == parts[i]
for i, part in enumerate(dir_path.split('/'))):
result.append(depsentry)
return result
def BuildDepsentryDict(deps_dict):
"""Builds a dict of paths to DepsEntry objects from a raw parsed deps dict."""
result = {}
def AddDepsEntries(deps_subdict):
for path, deps_url_spec in deps_subdict.iteritems():
# The deps url is either an URL and a condition, or just the URL.
if isinstance(deps_url_spec, dict):
if deps_url_spec.get('dep_type') == 'cipd':
continue
deps_url = deps_url_spec['url']
else:
deps_url = deps_url_spec
if not result.has_key(path):
url, revision = deps_url.split('@') if deps_url else (None, None)
result[path] = DepsEntry(path, url, revision)
AddDepsEntries(deps_dict['deps'])
for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']:
AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {}))
return result
def CalculateChangedDeps(libyuv_deps, new_cr_deps):
"""
Calculate changed deps entries based on entries defined in the libyuv DEPS
file:
- If a shared dependency with the Chromium DEPS file: roll it to the same
revision as Chromium (i.e. entry in the new_cr_deps dict)
- If it's a Chromium sub-directory, roll it to the HEAD revision (notice
this means it may be ahead of the chromium_revision, but generally these
should be close).
- If it's another DEPS entry (not shared with Chromium), roll it to HEAD
unless it's configured to be skipped.
Returns:
A list of ChangedDep objects representing the changed deps.
"""
result = []
libyuv_entries = BuildDepsentryDict(libyuv_deps)
new_cr_entries = BuildDepsentryDict(new_cr_deps)
for path, libyuv_deps_entry in libyuv_entries.iteritems():
if path in DONT_AUTOROLL_THESE:
continue
cr_deps_entry = new_cr_entries.get(path)
if cr_deps_entry:
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert libyuv_deps_entry.url == cr_deps_entry.url, (
'Libyuv DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, libyuv_deps_entry.url, cr_deps_entry.url))
else:
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', libyuv_deps_entry.url,
'HEAD'])
new_rev = stdout.strip().split('\t')[0]
# Check if an update is necessary.
if libyuv_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(ChangedDep(path, libyuv_deps_entry.url,
libyuv_deps_entry.revision, new_rev))
return sorted(result)
def CalculateChangedClang(new_cr_rev):
def GetClangRev(lines):
for line in lines:
match = CLANG_REVISION_RE.match(line)
if match:
return match.group(1)
raise RollError('Could not parse Clang revision!')
with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f:
current_lines = f.readlines()
current_rev = GetClangRev(current_lines)
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH,
new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
def GenerateCommitMessage(current_cr_rev, new_cr_rev, current_commit_pos,
new_commit_pos, changed_deps_list, clang_change):
current_cr_rev = current_cr_rev[0:10]
new_cr_rev = new_cr_rev[0:10]
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = ['Roll chromium_revision %s (%s)\n' % (rev_interval,
git_number_interval)]
commit_msg.append('Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval))
commit_msg.append('Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE %
rev_interval))
# TBR field will be empty unless in some custom cases, where some engineers
# are added.
tbr_authors = ''
if changed_deps_list:
commit_msg.append('Changed dependencies:')
for c in changed_deps_list:
commit_msg.append('* %s: %s/+log/%s..%s' % (c.path, c.url,
c.current_rev[0:10],
c.new_rev[0:10]))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
commit_msg.append('No dependencies changed.')
if clang_change.current_rev != clang_change.new_rev:
commit_msg.append('Clang version changed %s:%s' %
(clang_change.current_rev, clang_change.new_rev))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval,
CLANG_UPDATE_SCRIPT_URL_PATH)
commit_msg.append('Details: %s\n' % change_url)
else:
commit_msg.append('No update to Clang.\n')
commit_msg.append('TBR=%s' % tbr_authors)
commit_msg.append('BUG=None')
return '\n'.join(commit_msg)
def UpdateDepsFile(deps_filename, old_cr_revision, new_cr_revision,
changed_deps):
"""Update the DEPS file with the new revision."""
# Update the chromium_revision variable.
with open(deps_filename, 'rb') as deps_file:
deps_content = deps_file.read()
deps_content = deps_content.replace(old_cr_revision, new_cr_revision)
with open(deps_filename, 'wb') as deps_file:
deps_file.write(deps_content)
# Update each individual DEPS entry.
for dep in changed_deps:
local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path)
if not os.path.isdir(local_dep_dir):
raise RollError(
'Cannot find local directory %s. Make sure the .gclient file\n'
'contains all platforms in the target_os list, i.e.\n'
'target_os = ["android", "unix", "mac", "ios", "win"];\n'
'Then run "gclient sync" again.' % local_dep_dir)
_RunCommand(
['gclient', 'setdep', '--revision', '%s@%s' % (dep.path, dep.new_rev)],
working_dir=CHECKOUT_SRC_DIR)
def _IsTreeClean():
stdout, _ = _RunCommand(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
logging.error('Dirty/unversioned files:\n%s', stdout)
return False
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
logging.info('Updating master branch...')
_RunCommand(['git', 'pull'])
def _CreateRollBranch(dry_run):
logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
def _RemovePreviousRollBranch(dry_run):
active_branch, branches = _GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', active_branch])
_RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
def _LocalCommit(commit_msg, dry_run):
logging.info('Committing changes locally.')
if not dry_run:
_RunCommand(['git', 'add', '--update', '.'])
_RunCommand(['git', 'commit', '-m', commit_msg])
def _UploadCL(dry_run, rietveld_email=None):
logging.info('Uploading CL...')
if not dry_run:
cmd = ['git', 'cl', 'upload', '-f']
if rietveld_email:
cmd.append('--email=%s' % rietveld_email)
_RunCommand(cmd, extra_env={'EDITOR': 'true'})
def _SendToCQ(dry_run, skip_cq):
logging.info('Sending the CL to the CQ...')
if not dry_run and not skip_cq:
_RunCommand(['git', 'cl', 'set_commit'])
logging.info('Sent the CL to the CQ.')
def main():
p = argparse.ArgumentParser()
p.add_argument('--clean', action='store_true', default=False,
help='Removes any previous local roll branch.')
p.add_argument('-r', '--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument('-u', '--rietveld-email',
help=('E-mail address to use for creating the CL at Rietveld'
'If omitted a previously cached one will be used or an '
'error will be thrown during upload.'))
p.add_argument('--dry-run', action='store_true', default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument('-i', '--ignore-unclean-workdir', action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
p.add_argument('--skip-cq', action='store_true', default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not opts.ignore_unclean_workdir and not _IsTreeClean():
logging.error('Please clean your local checkout first.')
return 1
if opts.clean:
_RemovePreviousRollBranch(opts.dry_run)
if not opts.ignore_unclean_workdir:
_EnsureUpdatedMasterBranch(opts.dry_run)
new_cr_rev = opts.revision
if not new_cr_rev:
stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD'])
head_rev = stdout.strip().split('\t')[0]
logging.info('No revision specified. Using HEAD: %s', head_rev)
new_cr_rev = head_rev
deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS')
libyuv_deps = ParseLocalDepsFile(deps_filename)
current_cr_rev = libyuv_deps['vars']['chromium_revision']
current_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(current_cr_rev))
new_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(new_cr_rev))
new_cr_deps = ParseRemoteCrDepsFile(new_cr_rev)
changed_deps = CalculateChangedDeps(libyuv_deps, new_cr_deps)
clang_change = CalculateChangedClang(new_cr_rev)
commit_msg = GenerateCommitMessage(current_cr_rev, new_cr_rev,
current_commit_pos, new_commit_pos,
changed_deps, clang_change)
logging.debug('Commit message:\n%s', commit_msg)
_CreateRollBranch(opts.dry_run)
UpdateDepsFile(deps_filename, current_cr_rev, new_cr_rev, changed_deps)
_LocalCommit(commit_msg, opts.dry_run)
_UploadCL(opts.dry_run, opts.rietveld_email)
_SendToCQ(opts.dry_run, opts.skip_cq)
return 0
if __name__ == '__main__':
sys.exit(main())
|
# - coding: utf-8 -
# Copyright (C) 2008-2010 Toms Bauģis <toms.baugis at gmail.com>
# Dual licensed under the MIT or GPL Version 2 licenses.
# See http://github.com/tbaugis/hamster_experiments/blob/master/README.textile
import math
import datetime as dt
import gtk, gobject
import pango, cairo
import re
try:
import pytweener
except: # we can also live without tweener. Scene.animate will not work
pytweener = None
import colorsys
from collections import deque
class Colors(object):
hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
hex_color_long = re.compile("#([a-fA-F0-9]{4})([a-fA-F0-9]{4})([a-fA-F0-9]{4})")
def parse(self, color):
assert color is not None
#parse color into rgb values
if isinstance(color, basestring):
match = self.hex_color_long.match(color)
if match:
color = [int(color, 16) / 65535.0 for color in match.groups()]
else:
match = self.hex_color_normal.match(color)
if match:
color = [int(color, 16) / 255.0 for color in match.groups()]
else:
match = self.hex_color_short.match(color)
color = [int(color + color, 16) / 255.0 for color in match.groups()]
elif isinstance(color, gtk.gdk.Color):
color = [color.red / 65535.0,
color.green / 65535.0,
color.blue / 65535.0]
else:
# otherwise we assume we have color components in 0..255 range
if color[0] > 1 or color[1] > 1 or color[2] > 1:
color = [c / 255.0 for c in color]
return color
def rgb(self, color):
return [c * 255 for c in self.parse(color)]
def gdk(self, color):
c = self.parse(color)
return gtk.gdk.Color(int(c[0] * 65535.0), int(c[1] * 65535.0), int(c[2] * 65535.0))
def is_light(self, color):
# tells you if color is dark or light, so you can up or down the
# scale for improved contrast
return colorsys.rgb_to_hls(*self.rgb(color))[1] > 150
def darker(self, color, step):
# returns color darker by step (where step is in range 0..255)
hls = colorsys.rgb_to_hls(*self.rgb(color))
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
def contrast(self, color, step):
"""if color is dark, will return a lighter one, otherwise darker"""
hls = colorsys.rgb_to_hls(*self.rgb(color))
if self.is_light(color):
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
else:
return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
# returns color darker by step (where step is in range 0..255)
Colors = Colors() # this is a static class, so an instance will do
class Graphics(object):
"""If context is given upon contruction, will perform drawing
operations on context instantly. Otherwise queues up the drawing
instructions and performs them in passed-in order when _draw is called
with context.
Most of instructions are mapped to cairo functions by the same name.
Where there are differences, documenation is provided.
See http://www.cairographics.org/documentation/pycairo/reference/context.html#class-context
for detailed description of the cairo drawing functions.
"""
def __init__(self, context = None):
self.context = context
self.colors = Colors # pointer to the color utilities instance
self.extents = None # bounds of the object, only if interactive
self.opacity = 1.0 # opacity get's adjusted by parent - TODO - wrong inheritance?
self.paths = None # paths for mouse hit checks
self._last_matrix = None
self.__new_instructions = deque() # instruction set until it is converted into path-based instructions
self.__instruction_cache = None
self.cache_surface = None
def clear(self):
"""clear all instructions"""
self.__new_instructions = deque()
self.__instruction_cache = None
self.paths = []
@staticmethod
def _stroke(context): context.stroke()
def stroke(self, color = None, alpha = 1):
"""stroke the line with given color and opacity"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke,)
@staticmethod
def _fill(context): context.fill()
def fill(self, color = None, alpha = 1):
"""fill path with given color and opacity"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill,)
@staticmethod
def _stroke_preserve(context): context.stroke_preserve()
def stroke_preserve(self, color = None, alpha = 1):
"""same as stroke, only after stroking, don't discard the path"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke_preserve,)
@staticmethod
def _fill_preserve(context): context.fill_preserve()
def fill_preserve(self, color = None, alpha = 1):
"""same as fill, only after filling, don't discard the path"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill_preserve,)
@staticmethod
def _new_path(context): context.new_path()
def new_path(self):
"""discard current path"""
self._add_instruction(self._new_path,)
@staticmethod
def _paint(context): context.paint()
def paint(self):
"""errrm. paint"""
self._add_instruction(self._paint,)
@staticmethod
def _set_source(context, image):
context.set_source(image)
def set_source(self, image, x = 0, y = 0):
self._add_instruction(self._set_source, image)
@staticmethod
def _set_source_surface(context, surface, x, y):
context.set_source_surface(surface, x, y)
def set_source_surface(self, surface, x = 0, y = 0):
self._add_instruction(self._set_source_surface, surface, x, y)
@staticmethod
def _set_source_pixbuf(context, pixbuf, x, y):
context.set_source_pixbuf(pixbuf, x, y)
def set_source_pixbuf(self, pixbuf, x = 0, y = 0):
self._add_instruction(self._set_source_pixbuf, pixbuf, x, y)
@staticmethod
def _save_context(context): context.save()
def save_context(self):
"""change current position"""
self._add_instruction(self._save_context)
@staticmethod
def _restore_context(context): context.restore()
def restore_context(self):
"""change current position"""
self._add_instruction(self._restore_context)
@staticmethod
def _translate(context, x, y): context.translate(x, y)
def translate(self, x, y):
"""change current position"""
self._add_instruction(self._translate, x, y)
@staticmethod
def _move_to(context, x, y): context.move_to(x, y)
def move_to(self, x, y):
"""change current position"""
self._add_instruction(self._move_to, x, y)
@staticmethod
def _line_to(context, x, y): context.line_to(x, y)
def line_to(self, x, y = None):
"""draw line"""
if x and y is not None:
self._add_instruction(self._line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._line_to, x2, y2)
@staticmethod
def _rel_line_to(context, x, y): context.rel_line_to(x, y)
def rel_line_to(self, x, y = None):
"""draw line"""
if x and y:
self._add_instruction(self._rel_line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._rel_line_to, x2, y2)
@staticmethod
def _curve_to(context, x, y, x2, y2, x3, y3):
context.curve_to(x, y, x2, y2, x3, y3)
def curve_to(self, x, y, x2, y2, x3, y3):
"""draw curve. (x2, y2) is the middle point of the curve"""
self._add_instruction(self._curve_to, x, y, x2, y2, x3, y3)
@staticmethod
def _close_path(context): context.close_path()
def close_path(self):
"""connect end with beginning of path"""
self._add_instruction(self._close_path,)
@staticmethod
def _set_line_width(context, width):
context.set_line_width(width)
@staticmethod
def _set_dash(context, dash, dash_offset = 0):
context.set_dash(dash, dash_offset)
def set_line_style(self, width = None, dash = None, dash_offset = 0):
"""change the width of the line"""
if width is not None:
self._add_instruction(self._set_line_width, width)
if dash is not None:
self._add_instruction(self._set_dash, dash, dash_offset)
def _set_color(self, context, r, g, b, a):
if a * self.opacity >= 1:
context.set_source_rgb(r, g, b)
else:
context.set_source_rgba(r, g, b, a * self.opacity)
def set_color(self, color, alpha = 1):
"""set active color. You can use hex colors like "#aaa", or you can use
normalized RGB tripplets (where every value is in range 0..1), or
you can do the same thing in range 0..65535"""
color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
if len(color) == 4 and alpha is None:
alpha = color[3]
r, g, b = color[:3]
self._add_instruction(self._set_color, r, g, b, alpha)
@staticmethod
def _arc(context, x, y, radius, start_angle, end_angle):
context.arc(x, y, radius, start_angle, end_angle)
def arc(self, x, y, radius, start_angle, end_angle):
"""draw arc going counter-clockwise from start_angle to end_angle"""
self._add_instruction(self._arc, x, y, radius, start_angle, end_angle)
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction(self._arc, x, y, radius, 0, math.pi * 2)
def ellipse(self, x, y, width, height, edges = None):
"""draw 'perfect' ellipse, opposed to squashed circle. works also for
equilateral polygons"""
# the automatic edge case is somewhat arbitrary
steps = edges or max((32, width, height)) / 2
angle = 0
step = math.pi * 2 / steps
points = []
while angle < math.pi * 2:
points.append((width / 2.0 * math.cos(angle),
height / 2.0 * math.sin(angle)))
angle += step
min_x = min((point[0] for point in points))
min_y = min((point[1] for point in points))
self.move_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
for p_x, p_y in points:
self.line_to(p_x - min_x + x, p_y - min_y + y)
self.line_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
@staticmethod
def _arc_negative(context, x, y, radius, start_angle, end_angle):
context.arc_negative(x, y, radius, start_angle, end_angle)
def arc_negative(self, x, y, radius, start_angle, end_angle):
"""draw arc going clockwise from start_angle to end_angle"""
self._add_instruction(self._arc_negative, x, y, radius, start_angle, end_angle)
@staticmethod
def _rounded_rectangle(context, x, y, x2, y2, corner_radius):
half_corner = corner_radius / 2
context.move_to(x + corner_radius, y)
context.line_to(x2 - corner_radius, y)
context.curve_to(x2 - half_corner, y, x2, y + half_corner, x2, y + corner_radius)
context.line_to(x2, y2 - corner_radius)
context.curve_to(x2, y2 - half_corner, x2 - half_corner, y2, x2 - corner_radius, y2)
context.line_to(x + corner_radius, y2)
context.curve_to(x + half_corner, y2, x, y2 - half_corner, x, y2 - corner_radius)
context.line_to(x, y + corner_radius)
context.curve_to(x, y + half_corner, x + half_corner, y, x + corner_radius, y)
@staticmethod
def _rectangle(context, x, y, w, h): context.rectangle(x, y, w, h)
def rectangle(self, x, y, width, height, corner_radius = 0):
"draw a rectangle. if corner_radius is specified, will draw rounded corners"
if corner_radius <= 0:
self._add_instruction(self._rectangle, x, y, width, height)
return
# make sure that w + h are larger than 2 * corner_radius
corner_radius = min(corner_radius, min(width, height) / 2)
x2, y2 = x + width, y + height
self._add_instruction(self._rounded_rectangle, x, y, x2, y2, corner_radius)
def fill_area(self, x, y, width, height, color, opacity = 1):
"""fill rectangular area with specified color"""
self.rectangle(x, y, width, height)
self.fill(color, opacity)
def fill_stroke(self, fill = None, stroke = None, line_width = None):
if line_width: self.set_line_style(line_width)
if fill and stroke:
self.fill_preserve(fill)
elif fill:
self.fill(fill)
if stroke:
self.stroke(stroke)
@staticmethod
def _show_layout(context, text, font_desc, alignment, width, wrap, ellipsize):
layout = context.create_layout()
layout.set_font_description(font_desc)
layout.set_markup(text)
layout.set_width(width)
layout.set_alignment(alignment)
if width > 0:
if wrap is not None:
layout.set_wrap(wrap)
else:
layout.set_ellipsize(ellipsize or pango.ELLIPSIZE_END)
context.show_layout(layout)
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise "Can not create layout without existing context!"
layout = self.context.create_layout()
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if size: font_desc.set_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout
def show_text(self, text, size = None, color = None):
"""display text with system's default font"""
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if color: self.set_color(color)
if size: font_desc.set_size(size * pango.SCALE)
self.show_layout(text, font_desc)
def show_layout(self, text, font_desc, alignment = pango.ALIGN_LEFT, width = -1, wrap = None, ellipsize = None):
"""display text. font_desc is string of pango font description
often handier than calling this function directly, is to create
a class:Label object
"""
self._add_instruction(self._show_layout, text, font_desc, alignment, width, wrap, ellipsize)
def _remember_path(self, context, instruction):
context.save()
context.identity_matrix()
if instruction in (self._fill, self._fill_preserve):
new_extents = context.path_extents()
else:
new_extents = context.stroke_extents()
self.extents = self.extents or new_extents
self.extents = (min(self.extents[0], new_extents[0]),
min(self.extents[1], new_extents[1]),
max(self.extents[2], new_extents[2]),
max(self.extents[3], new_extents[3]))
self.paths.append(context.copy_path())
context.restore()
def _add_instruction(self, function, *params):
if self.context:
function(self.context, *params)
else:
self.paths = None
self.__new_instructions.append((function, params))
def _draw(self, context, with_extents = False):
"""draw accumulated instructions in context"""
if self.__new_instructions: #new stuff!
self.__instruction_cache = deque()
current_color = None
current_line = None
instruction_cache = []
while self.__new_instructions:
instruction, args = self.__new_instructions.popleft()
if instruction in (self._set_source,
self._set_source_surface,
self._set_source_pixbuf,
self._paint,
self._translate,
self._save_context,
self._restore_context):
self.__instruction_cache.append((None, None, None, instruction, args))
elif instruction == self._show_layout:
self.__instruction_cache.append((None, current_color, None, instruction, args))
elif instruction == self._set_color:
current_color = args
elif instruction == self._set_line_width:
current_line = args
elif instruction in (self._new_path, self._stroke, self._fill,
self._stroke_preserve,
self._fill_preserve):
self.__instruction_cache.append((context.copy_path(),
current_color,
current_line,
instruction, ()))
context.new_path() # reset even on preserve as the instruction will preserve it instead
instruction_cache = []
else:
# the rest are non-special
instruction(context, *args)
instruction_cache.append((instruction, args))
while instruction_cache: # stroke is missing so we just cache
instruction, args = instruction_cache.pop(0)
self.__instruction_cache.append((None, None, None, instruction, args))
# if we have been moved around, we should update bounds
check_extents = with_extents and (context.get_matrix() != self._last_matrix or not self.paths)
if check_extents:
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
return
for path, color, line, instruction, args in self.__instruction_cache:
if color: self._set_color(context, *color)
if line: self._set_line_width(context, *line)
if path:
context.append_path(path)
if check_extents:
self._remember_path(context, self._fill)
if instruction:
instruction(context, *args)
if check_extents and instruction not in (self._fill, self._stroke, self._fill_preserve, self._stroke_preserve):
# last one
self._remember_path(context, self._fill)
self._last_matrix = context.get_matrix()
def _draw_as_bitmap(self, context):
"""
instead of caching paths, this function caches the whole drawn thing
use cache_as_bitmap on sprite to enable this mode
"""
matrix = context.get_matrix()
if self.__new_instructions or matrix != self._last_matrix:
if self.__new_instructions:
self.__instruction_cache = list(self.__new_instructions)
self.__new_instructions = deque()
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
# no instructions - nothing to do
return
# instructions that end path
path_end_instructions = (self._new_path, self._stroke, self._fill, self._stroke_preserve, self._fill_preserve)
# measure the path in a dummy context so we know the size of surface
for instruction, args in self.__instruction_cache:
if instruction in path_end_instructions:
self._remember_path(context, instruction)
if instruction in (self._set_source_pixbuf, self._set_source_surface):
# draw a rectangle around the pathless instructions so that the extents are correct
pixbuf = args[0]
x = args[1] if len(args) > 1 else 0
y = args[2] if len(args) > 2 else 0
self._rectangle(context, x, y, pixbuf.get_width(), pixbuf.get_height())
instruction(context, *args)
if instruction not in path_end_instructions: # last one
self._remember_path(context, self._fill)
# now draw the instructions on the caching surface
w = int(self.extents[2] - self.extents[0]) + 1
h = int(self.extents[3] - self.extents[1]) + 1
self.cache_surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, w, h)
ctx = gtk.gdk.CairoContext(cairo.Context(self.cache_surface))
ctx.translate(-self.extents[0], -self.extents[1])
ctx.transform(matrix)
for instruction, args in self.__instruction_cache:
instruction(ctx, *args)
self._last_matrix = matrix
else:
context.save()
context.identity_matrix()
context.translate(self.extents[0], self.extents[1])
context.set_source_surface(self.cache_surface)
context.paint()
context.restore()
class Sprite(gtk.Object):
"""The Sprite class is a basic display list building block: a display list
node that can display graphics and can also contain children.
Once you have created the sprite, use Scene's add_child to add it to
scene
"""
__gsignals__ = {
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-render": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, x = 0, y = 0,
opacity = 1, visible = True,
rotation = 0, pivot_x = 0, pivot_y = 0,
scale_x = 1, scale_y = 1,
interactive = False, draggable = False,
z_order = 0, cache_as_bitmap = False):
gtk.Object.__init__(self)
#: list of children sprites. Use :func:`add_child` to add sprites
self.sprites = []
#: instance of :ref:`graphics` for this sprite
self.graphics = Graphics()
#: boolean denoting whether the sprite responds to mouse events
self.interactive = interactive
#: boolean marking if sprite can be automatically dragged
self.draggable = draggable
#: relative coordinates of the sprites anchor and rotation point
self.pivot_x, self.pivot_y = pivot_x, pivot_y # rotation point in sprite's coordinates
#: sprite opacity
self.opacity = opacity
#: boolean visibility flag
self.visible = visible
#: pointer to parent :class:`Sprite` or :class:`Scene`
self.parent = None
#: sprite coordinates
self.x, self.y = x, y
#: rotation of the sprite in radians (use :func:`math.degrees` to convert to degrees if necessary)
self.rotation = rotation
#: scale X
self.scale_x = scale_x
#: scale Y
self.scale_y = scale_y
#: drawing order between siblings. The one with the highest z_order will be on top.
self.z_order = z_order
#: Whether the sprite should be cached as a bitmap. Default: true
#: Generally good when you have many static sprites
self.cache_as_bitmap = cache_as_bitmap
self.__dict__["_sprite_dirty"] = True # flag that indicates that the graphics object of the sprite should be rendered
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") != val:
self.__dict__[name] = val
if name not in ('x', 'y', 'rotation', 'scale_x', 'scale_y', 'visible'):
self.__dict__["_sprite_dirty"] = True
def add_child(self, *sprites):
"""Add child sprite. Child will be nested within parent"""
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def check_hit(self, x, y):
"""check if the given coordinates are inside the sprite's fill or stroke
path"""
if not self.graphics.extents:
return False
sprite_x, sprite_y, sprite_x2, sprite_y2 = self.graphics.extents
if sprite_x <= x <= sprite_x2 and sprite_y <= y <= sprite_y2:
paths = self.graphics.paths
if not paths:
return True
context = cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))
for path in paths:
context.append_path(path)
return context.in_fill(x, y)
else:
return False
def _draw(self, context, opacity = 1):
if self.visible is False:
return
context.new_path()
if (self._sprite_dirty): # send signal to redo the drawing when sprite is dirty
self.emit("on-render")
self.__dict__["_sprite_dirty"] = False
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.save()
if any((self.x, self.y, self.pivot_x, self.pivot_y)):
context.translate(self.x + self.pivot_x, self.y + self.pivot_y)
if self.rotation:
context.rotate(self.rotation)
if self.pivot_x or self.pivot_y:
context.translate(-self.pivot_x, -self.pivot_y)
if self.scale_x != 1 or self.scale_y != 1:
context.scale(self.scale_x, self.scale_y)
self.graphics.opacity = self.opacity * opacity
if self.cache_as_bitmap:
self.graphics._draw_as_bitmap(context)
else:
self.graphics._draw(context, self.interactive or self.draggable)
for sprite in self.sprites:
sprite._draw(context, self.opacity * opacity)
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.restore()
def _on_click(self, button_state):
self.emit("on-click", button_state)
if self.parent and isinstance(self.parent, Sprite):
self.parent._on_click(button_state)
def _on_mouse_over(self):
# scene will call us when there is mouse
self.emit("on-mouse-over")
def _on_mouse_out(self):
# scene will call us when there is mouse
self.emit("on-mouse-out")
def _on_drag(self, event):
# scene will call us when there is mouse
self.emit("on-drag", event)
def _on_drag_finish(self, event):
# scene will call us when there is mouse
self.emit("on-drag-finish", event)
class Image(Sprite):
"""Displays image from file"""
def __init__(self, path, **kwargs):
Sprite.__init__(self, **kwargs)
#: path to the image
self.path = path
self.connect("on-render", self.on_render)
self.cache_surface = None
self.width, self.height = None, None
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'path': # no other reason to discard cache than just on path change
self.cache_surface = None
def _draw(self, context, opacity = 1):
if self.cache_surface == None and self.__dict__['cache_as_bitmap'] == False:
self.__dict__['cache_as_bitmap'] = True
elif self.cache_surface and self.__dict__['cache_as_bitmap']:
self.__dict__['cache_as_bitmap'] = False
Sprite._draw(self, context, opacity)
if self.cache_surface == None:
self.cache_surface = self.graphics.cache_surface
def on_render(self, sprite):
if self.cache_surface:
self.graphics.set_source_surface(self.cache_surface)
# TODO - drawing rectangle just to get the extents right - there might be a better way
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.paint()
else:
image = cairo.ImageSurface.create_from_png(self.path)
self.width, self.height = image.get_width(), image.get_height()
self.graphics.set_source_surface(image)
self.graphics.paint()
class Icon(Sprite):
"""Displays icon by name and size in the theme"""
def __init__(self, name, size=24, cache_as_bitmap = True, **kwargs):
Sprite.__init__(self, cache_as_bitmap = cache_as_bitmap, **kwargs)
self.theme = gtk.icon_theme_get_default()
#: icon name from theme
self.name = name
#: icon size in pixels
self.size = size
self.connect("on-render", self.on_render)
self.cache_surface = None
self.width, self.height = None, None
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'path': # no other reason to discard cache than just on path change
self.cache_surface = None
def _draw(self, context, opacity = 1):
if self.cache_surface == None and self.__dict__['cache_as_bitmap'] == False:
self.__dict__['cache_as_bitmap'] = True
elif self.cache_surface and self.__dict__['cache_as_bitmap']:
self.__dict__['cache_as_bitmap'] = False
Sprite._draw(self, context, opacity)
if self.cache_surface == None:
self.cache_surface = self.graphics.cache_surface
def on_render(self, sprite):
if self.cache_surface:
self.graphics.set_source_surface(self.cache_surface)
# TODO - drawing rectangle just to get the extents right - there might be a better way
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.paint()
else:
icon = self.theme.load_icon(self.name, self.size, 0)
self.width, self.height = icon.get_width(), icon.get_height()
self.graphics.set_source_pixbuf(icon)
self.graphics.paint()
class Label(Sprite):
def __init__(self, text = "", size = 10, color = None,
alignment = pango.ALIGN_LEFT, **kwargs):
Sprite.__init__(self, **kwargs)
self.width, self.height = None, None
#: pango.FontDescription, default is the system's font
self.font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
self.font_desc.set_size(size * pango.SCALE)
#: color of label either as hex string or an (r,g,b) tuple
self.color = color
self._bounds_width = -1
#: wrapping method. Can be set to pango. [WRAP_WORD, WRAP_CHAR,
#: WRAP_WORD_CHAR]
self.wrap = None
#: Ellipsize mode. Can be set to pango. [ELLIPSIZE_NONE,
#: ELLIPSIZE_START, ELLIPSIZE_MIDDLE, ELLIPSIZE_END]
self.ellipsize = None
#: alignment. one of pango.[ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER]
self.alignment = alignment
#: label text
self.text = text
#: font size
self.size = size
self.__surface = None
self.connect("on-render", self.on_render)
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == "width":
# setting width means consumer wants to contrain the label
if val is None or val == -1:
self.__dict__['_bounds_width'] = -1
else:
self.__dict__['_bounds_width'] = val * pango.SCALE
if name in ("width", "text", "size", "font_desc", "wrap", "ellipsize"):
# avoid chicken and egg
if "text" in self.__dict__ and "size" in self.__dict__ and "width" in self.__dict__:
self._set_dimensions()
def on_render(self, sprite):
self.graphics.clear()
if not self.text:
return
self.graphics.set_color(self.color)
self.graphics.show_layout(self.text, self.font_desc,
self.alignment,
self._bounds_width,
self.wrap,
self.ellipsize)
self.graphics.rectangle(0, 0, self.width, self.height)
def _set_dimensions(self):
context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
layout = context.create_layout()
layout.set_font_description(self.font_desc)
layout.set_markup(self.text)
layout.set_width(self._bounds_width)
layout.set_ellipsize(pango.ELLIPSIZE_NONE)
if self.wrap is not None:
layout.set_wrap(self.wrap)
else:
layout.set_ellipsize(self.ellipsize or pango.ELLIPSIZE_END)
# TODO - the __dict__ part look rather lame but allows to circumvent the setattr
self.__dict__['width'], self.height = layout.get_pixel_size()
class Rectangle(Sprite):
def __init__(self, w, h, corner_radius = 0, fill = None, stroke = None, **kwargs):
Sprite.__init__(self, **kwargs)
#: width
self.width = w
#: height
self.height = h
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = 1
#: corner radius. Set bigger than 0 for rounded corners
self.corner_radius = corner_radius
self.connect("on-render", self.on_render)
def on_render(self, sprite):
self.graphics.rectangle(0, 0, self.width, self.height, self.corner_radius)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Polygon(Sprite):
def __init__(self, points, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: list of (x,y) tuples that the line should go through. Polygon
#: will automatically close path.
self.points = points
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.points: return
self.graphics.move_to(*self.points[0])
self.graphics.line_to(*self.points)
self.graphics.close_path()
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Circle(Sprite):
def __init__(self, width, height, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: circle width
self.width = width
#: circle height
self.height = height
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if self.width == self.height:
self.graphics.circle(self.width, self.width / 2.0, self.width / 2.0)
else:
self.graphics.ellipse(0, 0, self.width, self.height)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Scene(gtk.DrawingArea):
""" Drawing area for displaying sprites.
Add sprites to the Scene by calling :func:`add_child`.
Scene is descendant of `gtk.DrawingArea <http://www.pygtk.org/docs/pygtk/class-gtkdrawingarea.html>`_
and thus inherits all it's methods and everything.
"""
__gsignals__ = {
"expose-event": "override",
"configure_event": "override",
"on-enter-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-finish-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-mouse-move": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-scroll": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, interactive = True, framerate = 80):
gtk.DrawingArea.__init__(self)
if interactive:
self.set_events(gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.SCROLL_MASK)
self.connect("motion_notify_event", self.__on_mouse_move)
self.connect("enter_notify_event", self.__on_mouse_enter)
self.connect("leave_notify_event", self.__on_mouse_leave)
self.connect("button_press_event", self.__on_button_press)
self.connect("button_release_event", self.__on_button_release)
self.connect("scroll-event", self.__on_scroll)
#: list of sprites in scene. use :func:`add_child` to add sprites
self.sprites = []
#: framerate of animation. This will limit how often call for
#: redraw will be performed (that is - not more often than the framerate). It will
#: also influence the smoothness of tweeners.
self.framerate = framerate
#: Scene width. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.width = None
#: Scene height. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.height = None
#: instance of :class:`pytweener.Tweener` that is used by
#: :func:`animate` function, but can be also accessed directly for advanced control.
self.tweener = None
if pytweener:
self.tweener = pytweener.Tweener(0.4, pytweener.Easing.Cubic.ease_in_out)
#: instance of :class:`Colors` class for color parsing
self.colors = Colors
#: read only info about current framerate (frames per second)
self.fps = 0 # inner frames per second counter
#: Last known x position of the mouse (set on expose event)
self.mouse_x = None
#: Last known y position of the mouse (set on expose event)
self.mouse_y = None
#: Mouse cursor appearance.
#: Replace with your own cursor or set to False to have no cursor.
#: None will revert back the default behavior
self.mouse_cursor = None
blank_pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
self._blank_cursor = gtk.gdk.Cursor(blank_pixmap, blank_pixmap, gtk.gdk.Color(), gtk.gdk.Color(), 0, 0)
#: Miminum distance in pixels for a drag to occur
self.drag_distance = 1
self._last_frame_time = None
self._mouse_sprite = None
self._drag_sprite = None
self.__drag_started = False
self.__drag_start_position = None
self._button_press_time = None # to distinguish between click and drag
self._mouse_in = False
self.__drawing_queued = False
self.__drag_x, self.__drag_y = None, None
self.__last_expose_time = dt.datetime.now()
self.__last_cursor = None
def add_child(self, *sprites):
"""Add one or several :class:`graphics.Sprite` sprites to scene """
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
"""Remove one or several :class:`graphics.Sprite` sprites from scene """
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def clear(self):
"""Remove all sprites from scene"""
self.remove_child(*self.sprites)
def redraw(self):
"""Queue redraw. The redraw will be performed not more often than
the `framerate` allows"""
if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already
self.__drawing_queued = True
self._last_frame_time = dt.datetime.now()
gobject.timeout_add(1000 / self.framerate, self.__interpolate)
# animation bits
def __interpolate(self):
if self.tweener:
self.tweener.update((dt.datetime.now() - self._last_frame_time).microseconds / 1000000.0)
self.__drawing_queued = self.tweener.has_tweens()
self._last_frame_time = dt.datetime.now()
self.queue_draw() # this will trigger do_expose_event when the current events have been flushed
return self.__drawing_queued
def animate(self, sprite, duration = None, easing = None, on_complete = None, on_update = None, delay = None, **kwargs):
"""Interpolate attributes of the given object using the internal tweener
and redrawing scene after every tweener update.
Specify the sprite and sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Redraw is requested right after creating the animation.
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
scene.animate(some_sprite, x = 50, y = 100)
"""
if not self.tweener: # here we complain
raise Exception("pytweener was not found. Include it to enable animations")
tween = self.tweener.add_tween(sprite,
duration=duration,
easing=easing,
on_complete=on_complete,
on_update=on_update,
delay=delay, **kwargs)
self.redraw()
return tween
# exposure events
def do_configure_event(self, event):
self.width, self.height = event.width, event.height
def do_expose_event(self, event):
context = self.window.cairo_create()
# clip to the visible part
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
now = dt.datetime.now()
self.fps = 1 / ((now - self.__last_expose_time).microseconds / 1000000.0)
self.__last_expose_time = now
self.mouse_x, self.mouse_y, mods = self.get_window().get_pointer()
self.emit("on-enter-frame", context)
for sprite in self.sprites:
sprite._draw(context)
self.__check_mouse(self.mouse_x, self.mouse_y)
self.emit("on-finish-frame", context)
def all_sprites(self, sprites = None):
"""Returns flat list of the sprite tree for simplified iteration"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
yield sprite
if sprite.sprites:
for child in self.all_sprites(sprite.sprites):
yield child
def all_visible_sprites(self, sprites = None):
"""Returns flat list of just the visible sprites - avoid children whos
parents are not displayed"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
if sprite.visible:
yield sprite
if sprite.sprites:
for child in self.all_visible_sprites(sprite.sprites):
yield child
def get_sprite_at_position(self, x, y):
"""Returns the topmost visible sprite for given coordinates"""
over = None
for sprite in self.all_visible_sprites():
if sprite.interactive and self.__check_hit(sprite, x, y):
over = sprite
return over
def __check_hit(self, sprite, x, y):
if sprite == self._drag_sprite:
return True
return sprite.check_hit(x, y)
def __check_mouse(self, x, y):
if x is None or self._mouse_in == False:
return
custom_mouse = self.mouse_cursor is not None
cursor = gtk.gdk.ARROW
if custom_mouse:
if self.mouse_cursor == False:
cursor = self._blank_cursor
else:
cursor = self.mouse_cursor
#check if we have a mouse over
over = self.get_sprite_at_position(x, y)
if over:
if custom_mouse == False:
if over.draggable:
cursor = gtk.gdk.FLEUR
else:
cursor = gtk.gdk.HAND2
if over != self._mouse_sprite:
over._on_mouse_over()
self.emit("on-mouse-over", over)
self.redraw()
if self._mouse_sprite and self._mouse_sprite != over:
self._mouse_sprite._on_mouse_out()
self.emit("on-mouse-out", self._mouse_sprite)
self.redraw()
self._mouse_sprite = over
if not self.__last_cursor or cursor != self.__last_cursor:
if isinstance(cursor, gtk.gdk.Cursor):
self.window.set_cursor(cursor)
else:
self.window.set_cursor(gtk.gdk.Cursor(cursor))
self.__last_cursor = cursor
""" mouse events """
def __on_mouse_move(self, area, event):
state = event.state
if self._drag_sprite and self._drag_sprite.draggable \
and gtk.gdk.BUTTON1_MASK & event.state:
# dragging around
self.__drag_started = self.__drag_started or \
(self.__drag_start_position and \
(self.__drag_start_position[0] - event.x) ** 2 + \
(self.__drag_start_position[1] - event.y) ** 2 > self.drag_distance ** 2)
if self.__drag_started:
matrix = cairo.Matrix()
if self._drag_sprite.parent and isinstance(self._drag_sprite.parent, Sprite):
# TODO - this currently works only until second level
# should take all parents into account
matrix.rotate(self._drag_sprite.parent.rotation)
matrix.invert()
if not self.__drag_x:
x1,y1 = matrix.transform_point(self.__drag_start_position[0],
self.__drag_start_position[1])
self.__drag_x = self._drag_sprite.x - x1
self.__drag_y = self._drag_sprite.y - y1
mouse_x, mouse_y = matrix.transform_point(event.x, event.y)
new_x = mouse_x + self.__drag_x
new_y = mouse_y + self.__drag_y
self._drag_sprite.x, self._drag_sprite.y = new_x, new_y
self._drag_sprite._on_drag(event)
self.emit("on-drag", self._drag_sprite, event)
self.redraw()
return
else:
# avoid double mouse checks - the redraw will also check for mouse!
if not self.__drawing_queued:
self.__check_mouse(event.x, event.y)
self.emit("on-mouse-move", event)
def __on_mouse_enter(self, area, event):
self._mouse_in = True
def __on_mouse_leave(self, area, event):
self._mouse_in = False
if self._mouse_sprite:
self.emit("on-mouse-out", self._mouse_sprite)
self._mouse_sprite = None
self.redraw()
def __on_button_press(self, area, event):
x = event.x
y = event.y
state = event.state
self.__drag_start_position = (x, y)
self._drag_sprite = self.get_sprite_at_position(event.x, event.y)
if self._drag_sprite and self._drag_sprite.draggable == False:
self._drag_sprite = None
self._button_press_time = dt.datetime.now()
self.emit("on-mouse-down", event)
def __on_button_release(self, area, event):
# we have a click if the drag is less than 5 pixels
click = self._button_press_time \
and (dt.datetime.now() - self._button_press_time) < dt.timedelta(milliseconds = 200) \
and (event.x - self.__drag_start_position[0]) ** 2 + (event.y - self.__drag_start_position[1]) ** 2 < 60
self._button_press_time = None
self.__drag_start_position = None
self.__drag_started = False
if self._drag_sprite:
self._drag_sprite._on_drag_finish(event)
self.emit("on-drag-finish", self._drag_sprite, event)
self.__drag_x, self.__drag_y = None, None
self._drag_sprite = None
if click:
target = self.get_sprite_at_position(event.x, event.y)
if target:
target._on_click(event.state)
self.emit("on-click", event, target)
self.emit("on-mouse-up", event)
def __on_scroll(self, area, event):
self.emit("on-scroll", event)
discard sprite's last position on finish
# - coding: utf-8 -
# Copyright (C) 2008-2010 Toms Bauģis <toms.baugis at gmail.com>
# Dual licensed under the MIT or GPL Version 2 licenses.
# See http://github.com/tbaugis/hamster_experiments/blob/master/README.textile
import math
import datetime as dt
import gtk, gobject
import pango, cairo
import re
try:
import pytweener
except: # we can also live without tweener. Scene.animate will not work
pytweener = None
import colorsys
from collections import deque
class Colors(object):
hex_color_normal = re.compile("#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})")
hex_color_short = re.compile("#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])")
hex_color_long = re.compile("#([a-fA-F0-9]{4})([a-fA-F0-9]{4})([a-fA-F0-9]{4})")
def parse(self, color):
assert color is not None
#parse color into rgb values
if isinstance(color, basestring):
match = self.hex_color_long.match(color)
if match:
color = [int(color, 16) / 65535.0 for color in match.groups()]
else:
match = self.hex_color_normal.match(color)
if match:
color = [int(color, 16) / 255.0 for color in match.groups()]
else:
match = self.hex_color_short.match(color)
color = [int(color + color, 16) / 255.0 for color in match.groups()]
elif isinstance(color, gtk.gdk.Color):
color = [color.red / 65535.0,
color.green / 65535.0,
color.blue / 65535.0]
else:
# otherwise we assume we have color components in 0..255 range
if color[0] > 1 or color[1] > 1 or color[2] > 1:
color = [c / 255.0 for c in color]
return color
def rgb(self, color):
return [c * 255 for c in self.parse(color)]
def gdk(self, color):
c = self.parse(color)
return gtk.gdk.Color(int(c[0] * 65535.0), int(c[1] * 65535.0), int(c[2] * 65535.0))
def is_light(self, color):
# tells you if color is dark or light, so you can up or down the
# scale for improved contrast
return colorsys.rgb_to_hls(*self.rgb(color))[1] > 150
def darker(self, color, step):
# returns color darker by step (where step is in range 0..255)
hls = colorsys.rgb_to_hls(*self.rgb(color))
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
def contrast(self, color, step):
"""if color is dark, will return a lighter one, otherwise darker"""
hls = colorsys.rgb_to_hls(*self.rgb(color))
if self.is_light(color):
return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2])
else:
return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
# returns color darker by step (where step is in range 0..255)
Colors = Colors() # this is a static class, so an instance will do
class Graphics(object):
"""If context is given upon contruction, will perform drawing
operations on context instantly. Otherwise queues up the drawing
instructions and performs them in passed-in order when _draw is called
with context.
Most of instructions are mapped to cairo functions by the same name.
Where there are differences, documenation is provided.
See http://www.cairographics.org/documentation/pycairo/reference/context.html#class-context
for detailed description of the cairo drawing functions.
"""
def __init__(self, context = None):
self.context = context
self.colors = Colors # pointer to the color utilities instance
self.extents = None # bounds of the object, only if interactive
self.opacity = 1.0 # opacity get's adjusted by parent - TODO - wrong inheritance?
self.paths = None # paths for mouse hit checks
self._last_matrix = None
self.__new_instructions = deque() # instruction set until it is converted into path-based instructions
self.__instruction_cache = None
self.cache_surface = None
def clear(self):
"""clear all instructions"""
self.__new_instructions = deque()
self.__instruction_cache = None
self.paths = []
@staticmethod
def _stroke(context): context.stroke()
def stroke(self, color = None, alpha = 1):
"""stroke the line with given color and opacity"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke,)
@staticmethod
def _fill(context): context.fill()
def fill(self, color = None, alpha = 1):
"""fill path with given color and opacity"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill,)
@staticmethod
def _stroke_preserve(context): context.stroke_preserve()
def stroke_preserve(self, color = None, alpha = 1):
"""same as stroke, only after stroking, don't discard the path"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._stroke_preserve,)
@staticmethod
def _fill_preserve(context): context.fill_preserve()
def fill_preserve(self, color = None, alpha = 1):
"""same as fill, only after filling, don't discard the path"""
if color or alpha < 1:self.set_color(color, alpha)
self._add_instruction(self._fill_preserve,)
@staticmethod
def _new_path(context): context.new_path()
def new_path(self):
"""discard current path"""
self._add_instruction(self._new_path,)
@staticmethod
def _paint(context): context.paint()
def paint(self):
"""errrm. paint"""
self._add_instruction(self._paint,)
@staticmethod
def _set_source(context, image):
context.set_source(image)
def set_source(self, image, x = 0, y = 0):
self._add_instruction(self._set_source, image)
@staticmethod
def _set_source_surface(context, surface, x, y):
context.set_source_surface(surface, x, y)
def set_source_surface(self, surface, x = 0, y = 0):
self._add_instruction(self._set_source_surface, surface, x, y)
@staticmethod
def _set_source_pixbuf(context, pixbuf, x, y):
context.set_source_pixbuf(pixbuf, x, y)
def set_source_pixbuf(self, pixbuf, x = 0, y = 0):
self._add_instruction(self._set_source_pixbuf, pixbuf, x, y)
@staticmethod
def _save_context(context): context.save()
def save_context(self):
"""change current position"""
self._add_instruction(self._save_context)
@staticmethod
def _restore_context(context): context.restore()
def restore_context(self):
"""change current position"""
self._add_instruction(self._restore_context)
@staticmethod
def _translate(context, x, y): context.translate(x, y)
def translate(self, x, y):
"""change current position"""
self._add_instruction(self._translate, x, y)
@staticmethod
def _move_to(context, x, y): context.move_to(x, y)
def move_to(self, x, y):
"""change current position"""
self._add_instruction(self._move_to, x, y)
@staticmethod
def _line_to(context, x, y): context.line_to(x, y)
def line_to(self, x, y = None):
"""draw line"""
if x and y is not None:
self._add_instruction(self._line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._line_to, x2, y2)
@staticmethod
def _rel_line_to(context, x, y): context.rel_line_to(x, y)
def rel_line_to(self, x, y = None):
"""draw line"""
if x and y:
self._add_instruction(self._rel_line_to, x, y)
elif isinstance(x, list) and y is None:
for x2, y2 in x:
self._add_instruction(self._rel_line_to, x2, y2)
@staticmethod
def _curve_to(context, x, y, x2, y2, x3, y3):
context.curve_to(x, y, x2, y2, x3, y3)
def curve_to(self, x, y, x2, y2, x3, y3):
"""draw curve. (x2, y2) is the middle point of the curve"""
self._add_instruction(self._curve_to, x, y, x2, y2, x3, y3)
@staticmethod
def _close_path(context): context.close_path()
def close_path(self):
"""connect end with beginning of path"""
self._add_instruction(self._close_path,)
@staticmethod
def _set_line_width(context, width):
context.set_line_width(width)
@staticmethod
def _set_dash(context, dash, dash_offset = 0):
context.set_dash(dash, dash_offset)
def set_line_style(self, width = None, dash = None, dash_offset = 0):
"""change the width of the line"""
if width is not None:
self._add_instruction(self._set_line_width, width)
if dash is not None:
self._add_instruction(self._set_dash, dash, dash_offset)
def _set_color(self, context, r, g, b, a):
if a * self.opacity >= 1:
context.set_source_rgb(r, g, b)
else:
context.set_source_rgba(r, g, b, a * self.opacity)
def set_color(self, color, alpha = 1):
"""set active color. You can use hex colors like "#aaa", or you can use
normalized RGB tripplets (where every value is in range 0..1), or
you can do the same thing in range 0..65535"""
color = self.colors.parse(color) # parse whatever we have there into a normalized triplet
if len(color) == 4 and alpha is None:
alpha = color[3]
r, g, b = color[:3]
self._add_instruction(self._set_color, r, g, b, alpha)
@staticmethod
def _arc(context, x, y, radius, start_angle, end_angle):
context.arc(x, y, radius, start_angle, end_angle)
def arc(self, x, y, radius, start_angle, end_angle):
"""draw arc going counter-clockwise from start_angle to end_angle"""
self._add_instruction(self._arc, x, y, radius, start_angle, end_angle)
def circle(self, x, y, radius):
"""draw circle"""
self._add_instruction(self._arc, x, y, radius, 0, math.pi * 2)
def ellipse(self, x, y, width, height, edges = None):
"""draw 'perfect' ellipse, opposed to squashed circle. works also for
equilateral polygons"""
# the automatic edge case is somewhat arbitrary
steps = edges or max((32, width, height)) / 2
angle = 0
step = math.pi * 2 / steps
points = []
while angle < math.pi * 2:
points.append((width / 2.0 * math.cos(angle),
height / 2.0 * math.sin(angle)))
angle += step
min_x = min((point[0] for point in points))
min_y = min((point[1] for point in points))
self.move_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
for p_x, p_y in points:
self.line_to(p_x - min_x + x, p_y - min_y + y)
self.line_to(points[0][0] - min_x + x, points[0][1] - min_y + y)
@staticmethod
def _arc_negative(context, x, y, radius, start_angle, end_angle):
context.arc_negative(x, y, radius, start_angle, end_angle)
def arc_negative(self, x, y, radius, start_angle, end_angle):
"""draw arc going clockwise from start_angle to end_angle"""
self._add_instruction(self._arc_negative, x, y, radius, start_angle, end_angle)
@staticmethod
def _rounded_rectangle(context, x, y, x2, y2, corner_radius):
half_corner = corner_radius / 2
context.move_to(x + corner_radius, y)
context.line_to(x2 - corner_radius, y)
context.curve_to(x2 - half_corner, y, x2, y + half_corner, x2, y + corner_radius)
context.line_to(x2, y2 - corner_radius)
context.curve_to(x2, y2 - half_corner, x2 - half_corner, y2, x2 - corner_radius, y2)
context.line_to(x + corner_radius, y2)
context.curve_to(x + half_corner, y2, x, y2 - half_corner, x, y2 - corner_radius)
context.line_to(x, y + corner_radius)
context.curve_to(x, y + half_corner, x + half_corner, y, x + corner_radius, y)
@staticmethod
def _rectangle(context, x, y, w, h): context.rectangle(x, y, w, h)
def rectangle(self, x, y, width, height, corner_radius = 0):
"draw a rectangle. if corner_radius is specified, will draw rounded corners"
if corner_radius <= 0:
self._add_instruction(self._rectangle, x, y, width, height)
return
# make sure that w + h are larger than 2 * corner_radius
corner_radius = min(corner_radius, min(width, height) / 2)
x2, y2 = x + width, y + height
self._add_instruction(self._rounded_rectangle, x, y, x2, y2, corner_radius)
def fill_area(self, x, y, width, height, color, opacity = 1):
"""fill rectangular area with specified color"""
self.rectangle(x, y, width, height)
self.fill(color, opacity)
def fill_stroke(self, fill = None, stroke = None, line_width = None):
if line_width: self.set_line_style(line_width)
if fill and stroke:
self.fill_preserve(fill)
elif fill:
self.fill(fill)
if stroke:
self.stroke(stroke)
@staticmethod
def _show_layout(context, text, font_desc, alignment, width, wrap, ellipsize):
layout = context.create_layout()
layout.set_font_description(font_desc)
layout.set_markup(text)
layout.set_width(width)
layout.set_alignment(alignment)
if width > 0:
if wrap is not None:
layout.set_wrap(wrap)
else:
layout.set_ellipsize(ellipsize or pango.ELLIPSIZE_END)
context.show_layout(layout)
def create_layout(self, size = None):
"""utility function to create layout with the default font. Size and
alignment parameters are shortcuts to according functions of the
pango.Layout"""
if not self.context:
# TODO - this is rather sloppy as far as exception goes
# should explain better
raise "Can not create layout without existing context!"
layout = self.context.create_layout()
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if size: font_desc.set_size(size * pango.SCALE)
layout.set_font_description(font_desc)
return layout
def show_text(self, text, size = None, color = None):
"""display text with system's default font"""
font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
if color: self.set_color(color)
if size: font_desc.set_size(size * pango.SCALE)
self.show_layout(text, font_desc)
def show_layout(self, text, font_desc, alignment = pango.ALIGN_LEFT, width = -1, wrap = None, ellipsize = None):
"""display text. font_desc is string of pango font description
often handier than calling this function directly, is to create
a class:Label object
"""
self._add_instruction(self._show_layout, text, font_desc, alignment, width, wrap, ellipsize)
def _remember_path(self, context, instruction):
context.save()
context.identity_matrix()
if instruction in (self._fill, self._fill_preserve):
new_extents = context.path_extents()
else:
new_extents = context.stroke_extents()
self.extents = self.extents or new_extents
self.extents = (min(self.extents[0], new_extents[0]),
min(self.extents[1], new_extents[1]),
max(self.extents[2], new_extents[2]),
max(self.extents[3], new_extents[3]))
self.paths.append(context.copy_path())
context.restore()
def _add_instruction(self, function, *params):
if self.context:
function(self.context, *params)
else:
self.paths = None
self.__new_instructions.append((function, params))
def _draw(self, context, with_extents = False):
"""draw accumulated instructions in context"""
if self.__new_instructions: #new stuff!
self.__instruction_cache = deque()
current_color = None
current_line = None
instruction_cache = []
while self.__new_instructions:
instruction, args = self.__new_instructions.popleft()
if instruction in (self._set_source,
self._set_source_surface,
self._set_source_pixbuf,
self._paint,
self._translate,
self._save_context,
self._restore_context):
self.__instruction_cache.append((None, None, None, instruction, args))
elif instruction == self._show_layout:
self.__instruction_cache.append((None, current_color, None, instruction, args))
elif instruction == self._set_color:
current_color = args
elif instruction == self._set_line_width:
current_line = args
elif instruction in (self._new_path, self._stroke, self._fill,
self._stroke_preserve,
self._fill_preserve):
self.__instruction_cache.append((context.copy_path(),
current_color,
current_line,
instruction, ()))
context.new_path() # reset even on preserve as the instruction will preserve it instead
instruction_cache = []
else:
# the rest are non-special
instruction(context, *args)
instruction_cache.append((instruction, args))
while instruction_cache: # stroke is missing so we just cache
instruction, args = instruction_cache.pop(0)
self.__instruction_cache.append((None, None, None, instruction, args))
# if we have been moved around, we should update bounds
check_extents = with_extents and (context.get_matrix() != self._last_matrix or not self.paths)
if check_extents:
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
return
for path, color, line, instruction, args in self.__instruction_cache:
if color: self._set_color(context, *color)
if line: self._set_line_width(context, *line)
if path:
context.append_path(path)
if check_extents:
self._remember_path(context, self._fill)
if instruction:
instruction(context, *args)
if check_extents and instruction not in (self._fill, self._stroke, self._fill_preserve, self._stroke_preserve):
# last one
self._remember_path(context, self._fill)
self._last_matrix = context.get_matrix()
def _draw_as_bitmap(self, context):
"""
instead of caching paths, this function caches the whole drawn thing
use cache_as_bitmap on sprite to enable this mode
"""
matrix = context.get_matrix()
if self.__new_instructions or matrix != self._last_matrix:
if self.__new_instructions:
self.__instruction_cache = list(self.__new_instructions)
self.__new_instructions = deque()
self.paths = deque()
self.extents = None
if not self.__instruction_cache:
# no instructions - nothing to do
return
# instructions that end path
path_end_instructions = (self._new_path, self._stroke, self._fill, self._stroke_preserve, self._fill_preserve)
# measure the path extents so we know the size of surface
for instruction, args in self.__instruction_cache:
if instruction in path_end_instructions:
self._remember_path(context, instruction)
if instruction in (self._set_source_pixbuf, self._set_source_surface):
# draw a rectangle around the pathless instructions so that the extents are correct
pixbuf = args[0]
x = args[1] if len(args) > 1 else 0
y = args[2] if len(args) > 2 else 0
self._rectangle(context, x, y, pixbuf.get_width(), pixbuf.get_height())
instruction(context, *args)
if instruction not in path_end_instructions: # last one
self._remember_path(context, self._fill)
# now draw the instructions on the caching surface
w = int(self.extents[2] - self.extents[0]) + 1
h = int(self.extents[3] - self.extents[1]) + 1
self.cache_surface = context.get_target().create_similar(cairo.CONTENT_COLOR_ALPHA, w, h)
ctx = gtk.gdk.CairoContext(cairo.Context(self.cache_surface))
ctx.translate(-self.extents[0], -self.extents[1])
ctx.transform(matrix)
for instruction, args in self.__instruction_cache:
instruction(ctx, *args)
self._last_matrix = matrix
else:
context.save()
context.identity_matrix()
context.translate(self.extents[0], self.extents[1])
context.set_source_surface(self.cache_surface)
context.paint()
context.restore()
class Sprite(gtk.Object):
"""The Sprite class is a basic display list building block: a display list
node that can display graphics and can also contain children.
Once you have created the sprite, use Scene's add_child to add it to
scene
"""
__gsignals__ = {
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-render": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
}
def __init__(self, x = 0, y = 0,
opacity = 1, visible = True,
rotation = 0, pivot_x = 0, pivot_y = 0,
scale_x = 1, scale_y = 1,
interactive = False, draggable = False,
z_order = 0, cache_as_bitmap = False):
gtk.Object.__init__(self)
#: list of children sprites. Use :func:`add_child` to add sprites
self.sprites = []
#: instance of :ref:`graphics` for this sprite
self.graphics = Graphics()
#: boolean denoting whether the sprite responds to mouse events
self.interactive = interactive
#: boolean marking if sprite can be automatically dragged
self.draggable = draggable
#: relative coordinates of the sprites anchor and rotation point
self.pivot_x, self.pivot_y = pivot_x, pivot_y # rotation point in sprite's coordinates
#: sprite opacity
self.opacity = opacity
#: boolean visibility flag
self.visible = visible
#: pointer to parent :class:`Sprite` or :class:`Scene`
self.parent = None
#: sprite coordinates
self.x, self.y = x, y
#: rotation of the sprite in radians (use :func:`math.degrees` to convert to degrees if necessary)
self.rotation = rotation
#: scale X
self.scale_x = scale_x
#: scale Y
self.scale_y = scale_y
#: drawing order between siblings. The one with the highest z_order will be on top.
self.z_order = z_order
#: Whether the sprite should be cached as a bitmap. Default: true
#: Generally good when you have many static sprites
self.cache_as_bitmap = cache_as_bitmap
self.__dict__["_sprite_dirty"] = True # flag that indicates that the graphics object of the sprite should be rendered
def __setattr__(self, name, val):
if self.__dict__.get(name, "hamster_graphics_no_value_really") != val:
self.__dict__[name] = val
if name not in ('x', 'y', 'rotation', 'scale_x', 'scale_y', 'visible'):
self.__dict__["_sprite_dirty"] = True
def add_child(self, *sprites):
"""Add child sprite. Child will be nested within parent"""
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def check_hit(self, x, y):
"""check if the given coordinates are inside the sprite's fill or stroke
path"""
if not self.graphics.extents:
return False
sprite_x, sprite_y, sprite_x2, sprite_y2 = self.graphics.extents
if sprite_x <= x <= sprite_x2 and sprite_y <= y <= sprite_y2:
paths = self.graphics.paths
if not paths:
return True
context = cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))
for path in paths:
context.append_path(path)
return context.in_fill(x, y)
else:
return False
def _draw(self, context, opacity = 1):
if self.visible is False:
return
context.new_path()
if (self._sprite_dirty): # send signal to redo the drawing when sprite is dirty
self.emit("on-render")
self.__dict__["_sprite_dirty"] = False
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.save()
if any((self.x, self.y, self.pivot_x, self.pivot_y)):
context.translate(self.x + self.pivot_x, self.y + self.pivot_y)
if self.rotation:
context.rotate(self.rotation)
if self.pivot_x or self.pivot_y:
context.translate(-self.pivot_x, -self.pivot_y)
if self.scale_x != 1 or self.scale_y != 1:
context.scale(self.scale_x, self.scale_y)
self.graphics.opacity = self.opacity * opacity
if self.cache_as_bitmap:
self.graphics._draw_as_bitmap(context)
else:
self.graphics._draw(context, self.interactive or self.draggable)
for sprite in self.sprites:
sprite._draw(context, self.opacity * opacity)
if any((self.x, self.y, self.rotation, self.scale_x, self.scale_y)):
context.restore()
context.new_path() #forget about us
def _on_click(self, button_state):
self.emit("on-click", button_state)
if self.parent and isinstance(self.parent, Sprite):
self.parent._on_click(button_state)
def _on_mouse_over(self):
# scene will call us when there is mouse
self.emit("on-mouse-over")
def _on_mouse_out(self):
# scene will call us when there is mouse
self.emit("on-mouse-out")
def _on_drag(self, event):
# scene will call us when there is mouse
self.emit("on-drag", event)
def _on_drag_finish(self, event):
# scene will call us when there is mouse
self.emit("on-drag-finish", event)
class Image(Sprite):
"""Displays image from file"""
def __init__(self, path, **kwargs):
Sprite.__init__(self, **kwargs)
#: path to the image
self.path = path
self.connect("on-render", self.on_render)
self.cache_surface = None
self.width, self.height = None, None
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'path': # no other reason to discard cache than just on path change
self.cache_surface = None
def _draw(self, context, opacity = 1):
if self.cache_surface == None and self.__dict__['cache_as_bitmap'] == False:
self.__dict__['cache_as_bitmap'] = True
elif self.cache_surface and self.__dict__['cache_as_bitmap']:
self.__dict__['cache_as_bitmap'] = False
Sprite._draw(self, context, opacity)
if self.cache_surface == None:
self.cache_surface = self.graphics.cache_surface
def on_render(self, sprite):
if self.cache_surface:
self.graphics.set_source_surface(self.cache_surface)
# TODO - drawing rectangle just to get the extents right - there might be a better way
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.paint()
else:
image = cairo.ImageSurface.create_from_png(self.path)
self.width, self.height = image.get_width(), image.get_height()
self.graphics.set_source_surface(image)
self.graphics.paint()
class Icon(Sprite):
"""Displays icon by name and size in the theme"""
def __init__(self, name, size=24, cache_as_bitmap = True, **kwargs):
Sprite.__init__(self, cache_as_bitmap = cache_as_bitmap, **kwargs)
self.theme = gtk.icon_theme_get_default()
#: icon name from theme
self.name = name
#: icon size in pixels
self.size = size
self.connect("on-render", self.on_render)
self.cache_surface = None
self.width, self.height = None, None
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == 'path': # no other reason to discard cache than just on path change
self.cache_surface = None
def _draw(self, context, opacity = 1):
if self.cache_surface == None and self.__dict__['cache_as_bitmap'] == False:
self.__dict__['cache_as_bitmap'] = True
elif self.cache_surface and self.__dict__['cache_as_bitmap']:
self.__dict__['cache_as_bitmap'] = False
Sprite._draw(self, context, opacity)
if self.cache_surface == None:
self.cache_surface = self.graphics.cache_surface
def on_render(self, sprite):
if self.cache_surface:
self.graphics.set_source_surface(self.cache_surface)
# TODO - drawing rectangle just to get the extents right - there might be a better way
self.graphics.rectangle(0, 0, self.width, self.height)
self.graphics.paint()
else:
icon = self.theme.load_icon(self.name, self.size, 0)
self.width, self.height = icon.get_width(), icon.get_height()
self.graphics.set_source_pixbuf(icon)
self.graphics.paint()
class Label(Sprite):
def __init__(self, text = "", size = 10, color = None,
alignment = pango.ALIGN_LEFT, **kwargs):
Sprite.__init__(self, **kwargs)
self.width, self.height = None, None
#: pango.FontDescription, default is the system's font
self.font_desc = pango.FontDescription(gtk.Style().font_desc.to_string())
self.font_desc.set_size(size * pango.SCALE)
#: color of label either as hex string or an (r,g,b) tuple
self.color = color
self._bounds_width = -1
#: wrapping method. Can be set to pango. [WRAP_WORD, WRAP_CHAR,
#: WRAP_WORD_CHAR]
self.wrap = None
#: Ellipsize mode. Can be set to pango. [ELLIPSIZE_NONE,
#: ELLIPSIZE_START, ELLIPSIZE_MIDDLE, ELLIPSIZE_END]
self.ellipsize = None
#: alignment. one of pango.[ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER]
self.alignment = alignment
#: label text
self.text = text
#: font size
self.size = size
self.__surface = None
self.connect("on-render", self.on_render)
def __setattr__(self, name, val):
Sprite.__setattr__(self, name, val)
if name == "width":
# setting width means consumer wants to contrain the label
if val is None or val == -1:
self.__dict__['_bounds_width'] = -1
else:
self.__dict__['_bounds_width'] = val * pango.SCALE
if name in ("width", "text", "size", "font_desc", "wrap", "ellipsize"):
# avoid chicken and egg
if "text" in self.__dict__ and "size" in self.__dict__ and "width" in self.__dict__:
self._set_dimensions()
def on_render(self, sprite):
self.graphics.clear()
if not self.text:
return
self.graphics.set_color(self.color)
self.graphics.show_layout(self.text, self.font_desc,
self.alignment,
self._bounds_width,
self.wrap,
self.ellipsize)
self.graphics.rectangle(0, 0, self.width, self.height)
def _set_dimensions(self):
context = gtk.gdk.CairoContext(cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0)))
layout = context.create_layout()
layout.set_font_description(self.font_desc)
layout.set_markup(self.text)
layout.set_width(self._bounds_width)
layout.set_ellipsize(pango.ELLIPSIZE_NONE)
if self.wrap is not None:
layout.set_wrap(self.wrap)
else:
layout.set_ellipsize(self.ellipsize or pango.ELLIPSIZE_END)
# TODO - the __dict__ part look rather lame but allows to circumvent the setattr
self.__dict__['width'], self.height = layout.get_pixel_size()
class Rectangle(Sprite):
def __init__(self, w, h, corner_radius = 0, fill = None, stroke = None, **kwargs):
Sprite.__init__(self, **kwargs)
#: width
self.width = w
#: height
self.height = h
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = 1
#: corner radius. Set bigger than 0 for rounded corners
self.corner_radius = corner_radius
self.connect("on-render", self.on_render)
def on_render(self, sprite):
self.graphics.rectangle(0, 0, self.width, self.height, self.corner_radius)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Polygon(Sprite):
def __init__(self, points, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: list of (x,y) tuples that the line should go through. Polygon
#: will automatically close path.
self.points = points
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if not self.points: return
self.graphics.move_to(*self.points[0])
self.graphics.line_to(*self.points)
self.graphics.close_path()
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Circle(Sprite):
def __init__(self, width, height, fill = None, stroke = None, line_width = 1, **kwargs):
Sprite.__init__(self, **kwargs)
#: circle width
self.width = width
#: circle height
self.height = height
#: fill color
self.fill = fill
#: stroke color
self.stroke = stroke
#: stroke line width
self.line_width = line_width
self.connect("on-render", self.on_render)
def on_render(self, sprite):
if self.width == self.height:
self.graphics.circle(self.width, self.width / 2.0, self.width / 2.0)
else:
self.graphics.ellipse(0, 0, self.width, self.height)
self.graphics.fill_stroke(self.fill, self.stroke, self.line_width)
class Scene(gtk.DrawingArea):
""" Drawing area for displaying sprites.
Add sprites to the Scene by calling :func:`add_child`.
Scene is descendant of `gtk.DrawingArea <http://www.pygtk.org/docs/pygtk/class-gtkdrawingarea.html>`_
and thus inherits all it's methods and everything.
"""
__gsignals__ = {
"expose-event": "override",
"configure_event": "override",
"on-enter-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-finish-frame": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, )),
"on-click": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-drag-finish": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
"on-mouse-move": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-down": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-up": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-over": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-mouse-out": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"on-scroll": (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self, interactive = True, framerate = 80):
gtk.DrawingArea.__init__(self)
if interactive:
self.set_events(gtk.gdk.POINTER_MOTION_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.SCROLL_MASK)
self.connect("motion_notify_event", self.__on_mouse_move)
self.connect("enter_notify_event", self.__on_mouse_enter)
self.connect("leave_notify_event", self.__on_mouse_leave)
self.connect("button_press_event", self.__on_button_press)
self.connect("button_release_event", self.__on_button_release)
self.connect("scroll-event", self.__on_scroll)
#: list of sprites in scene. use :func:`add_child` to add sprites
self.sprites = []
#: framerate of animation. This will limit how often call for
#: redraw will be performed (that is - not more often than the framerate). It will
#: also influence the smoothness of tweeners.
self.framerate = framerate
#: Scene width. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.width = None
#: Scene height. Will be `None` until first expose (that is until first
#: on-enter-frame signal below).
self.height = None
#: instance of :class:`pytweener.Tweener` that is used by
#: :func:`animate` function, but can be also accessed directly for advanced control.
self.tweener = None
if pytweener:
self.tweener = pytweener.Tweener(0.4, pytweener.Easing.Cubic.ease_in_out)
#: instance of :class:`Colors` class for color parsing
self.colors = Colors
#: read only info about current framerate (frames per second)
self.fps = 0 # inner frames per second counter
#: Last known x position of the mouse (set on expose event)
self.mouse_x = None
#: Last known y position of the mouse (set on expose event)
self.mouse_y = None
#: Mouse cursor appearance.
#: Replace with your own cursor or set to False to have no cursor.
#: None will revert back the default behavior
self.mouse_cursor = None
blank_pixmap = gtk.gdk.Pixmap(None, 1, 1, 1)
self._blank_cursor = gtk.gdk.Cursor(blank_pixmap, blank_pixmap, gtk.gdk.Color(), gtk.gdk.Color(), 0, 0)
#: Miminum distance in pixels for a drag to occur
self.drag_distance = 1
self._last_frame_time = None
self._mouse_sprite = None
self._drag_sprite = None
self.__drag_started = False
self.__drag_start_position = None
self._button_press_time = None # to distinguish between click and drag
self._mouse_in = False
self.__drawing_queued = False
self.__drag_x, self.__drag_y = None, None
self.__last_expose_time = dt.datetime.now()
self.__last_cursor = None
def add_child(self, *sprites):
"""Add one or several :class:`graphics.Sprite` sprites to scene """
for sprite in sprites:
if sprite.parent:
sprite.parent.remove_child(sprite)
self.sprites.append(sprite)
sprite.parent = self
self.sprites = sorted(self.sprites, key=lambda sprite:sprite.z_order)
def remove_child(self, *sprites):
"""Remove one or several :class:`graphics.Sprite` sprites from scene """
for sprite in sprites:
self.sprites.remove(sprite)
sprite.parent = None
def clear(self):
"""Remove all sprites from scene"""
self.remove_child(*self.sprites)
def redraw(self):
"""Queue redraw. The redraw will be performed not more often than
the `framerate` allows"""
if self.__drawing_queued == False: #if we are moving, then there is a timeout somewhere already
self.__drawing_queued = True
self._last_frame_time = dt.datetime.now()
gobject.timeout_add(1000 / self.framerate, self.__interpolate)
# animation bits
def __interpolate(self):
if self.tweener:
self.tweener.update((dt.datetime.now() - self._last_frame_time).microseconds / 1000000.0)
self.__drawing_queued = self.tweener.has_tweens()
self._last_frame_time = dt.datetime.now()
self.queue_draw() # this will trigger do_expose_event when the current events have been flushed
return self.__drawing_queued
def animate(self, sprite, duration = None, easing = None, on_complete = None, on_update = None, delay = None, **kwargs):
"""Interpolate attributes of the given object using the internal tweener
and redrawing scene after every tweener update.
Specify the sprite and sprite's attributes that need changing.
`duration` defaults to 0.4 seconds and `easing` to cubic in-out
(for others see pytweener.Easing class).
Redraw is requested right after creating the animation.
Example::
# tween some_sprite to coordinates (50,100) using default duration and easing
scene.animate(some_sprite, x = 50, y = 100)
"""
if not self.tweener: # here we complain
raise Exception("pytweener was not found. Include it to enable animations")
tween = self.tweener.add_tween(sprite,
duration=duration,
easing=easing,
on_complete=on_complete,
on_update=on_update,
delay=delay, **kwargs)
self.redraw()
return tween
# exposure events
def do_configure_event(self, event):
self.width, self.height = event.width, event.height
def do_expose_event(self, event):
context = self.window.cairo_create()
# clip to the visible part
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
now = dt.datetime.now()
self.fps = 1 / ((now - self.__last_expose_time).microseconds / 1000000.0)
self.__last_expose_time = now
self.mouse_x, self.mouse_y, mods = self.get_window().get_pointer()
self.emit("on-enter-frame", context)
for sprite in self.sprites:
sprite._draw(context)
self.__check_mouse(self.mouse_x, self.mouse_y)
self.emit("on-finish-frame", context)
def all_sprites(self, sprites = None):
"""Returns flat list of the sprite tree for simplified iteration"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
yield sprite
if sprite.sprites:
for child in self.all_sprites(sprite.sprites):
yield child
def all_visible_sprites(self, sprites = None):
"""Returns flat list of just the visible sprites - avoid children whos
parents are not displayed"""
if sprites is None:
sprites = self.sprites
for sprite in sprites:
if sprite.visible:
yield sprite
if sprite.sprites:
for child in self.all_visible_sprites(sprite.sprites):
yield child
def get_sprite_at_position(self, x, y):
"""Returns the topmost visible sprite for given coordinates"""
over = None
for sprite in self.all_visible_sprites():
if sprite.interactive and self.__check_hit(sprite, x, y):
over = sprite
return over
def __check_hit(self, sprite, x, y):
if sprite == self._drag_sprite:
return True
return sprite.check_hit(x, y)
def __check_mouse(self, x, y):
if x is None or self._mouse_in == False:
return
custom_mouse = self.mouse_cursor is not None
cursor = gtk.gdk.ARROW
if custom_mouse:
if self.mouse_cursor == False:
cursor = self._blank_cursor
else:
cursor = self.mouse_cursor
#check if we have a mouse over
over = self.get_sprite_at_position(x, y)
if over:
if custom_mouse == False:
if over.draggable:
cursor = gtk.gdk.FLEUR
else:
cursor = gtk.gdk.HAND2
if over != self._mouse_sprite:
over._on_mouse_over()
self.emit("on-mouse-over", over)
self.redraw()
if self._mouse_sprite and self._mouse_sprite != over:
self._mouse_sprite._on_mouse_out()
self.emit("on-mouse-out", self._mouse_sprite)
self.redraw()
self._mouse_sprite = over
if not self.__last_cursor or cursor != self.__last_cursor:
if isinstance(cursor, gtk.gdk.Cursor):
self.window.set_cursor(cursor)
else:
self.window.set_cursor(gtk.gdk.Cursor(cursor))
self.__last_cursor = cursor
""" mouse events """
def __on_mouse_move(self, area, event):
state = event.state
if self._drag_sprite and self._drag_sprite.draggable \
and gtk.gdk.BUTTON1_MASK & event.state:
# dragging around
self.__drag_started = self.__drag_started or \
(self.__drag_start_position and \
(self.__drag_start_position[0] - event.x) ** 2 + \
(self.__drag_start_position[1] - event.y) ** 2 > self.drag_distance ** 2)
if self.__drag_started:
matrix = cairo.Matrix()
if self._drag_sprite.parent and isinstance(self._drag_sprite.parent, Sprite):
# TODO - this currently works only until second level
# should take all parents into account
matrix.rotate(self._drag_sprite.parent.rotation)
matrix.invert()
if not self.__drag_x:
x1,y1 = matrix.transform_point(self.__drag_start_position[0],
self.__drag_start_position[1])
self.__drag_x = self._drag_sprite.x - x1
self.__drag_y = self._drag_sprite.y - y1
mouse_x, mouse_y = matrix.transform_point(event.x, event.y)
new_x = mouse_x + self.__drag_x
new_y = mouse_y + self.__drag_y
self._drag_sprite.x, self._drag_sprite.y = new_x, new_y
self._drag_sprite._on_drag(event)
self.emit("on-drag", self._drag_sprite, event)
self.redraw()
return
else:
# avoid double mouse checks - the redraw will also check for mouse!
if not self.__drawing_queued:
self.__check_mouse(event.x, event.y)
self.emit("on-mouse-move", event)
def __on_mouse_enter(self, area, event):
self._mouse_in = True
def __on_mouse_leave(self, area, event):
self._mouse_in = False
if self._mouse_sprite:
self.emit("on-mouse-out", self._mouse_sprite)
self._mouse_sprite = None
self.redraw()
def __on_button_press(self, area, event):
x = event.x
y = event.y
state = event.state
self.__drag_start_position = (x, y)
self._drag_sprite = self.get_sprite_at_position(event.x, event.y)
if self._drag_sprite and self._drag_sprite.draggable == False:
self._drag_sprite = None
self._button_press_time = dt.datetime.now()
self.emit("on-mouse-down", event)
def __on_button_release(self, area, event):
# we have a click if the drag is less than 5 pixels
click = self._button_press_time \
and (dt.datetime.now() - self._button_press_time) < dt.timedelta(milliseconds = 200) \
and (event.x - self.__drag_start_position[0]) ** 2 + (event.y - self.__drag_start_position[1]) ** 2 < 60
self._button_press_time = None
self.__drag_start_position = None
self.__drag_started = False
if self._drag_sprite:
self._drag_sprite._on_drag_finish(event)
self.emit("on-drag-finish", self._drag_sprite, event)
self.__drag_x, self.__drag_y = None, None
self._drag_sprite = None
if click:
target = self.get_sprite_at_position(event.x, event.y)
if target:
target._on_click(event.state)
self.emit("on-click", event, target)
self.emit("on-mouse-up", event)
def __on_scroll(self, area, event):
self.emit("on-scroll", event)
|
"""Framework for a script to be run from a shell"""
from __future__ import print_function
import argparse
import os
import sys
from dotsite import __version__
from dotsite.debuggers import DebugExit
_versions = [__version__]
_exit_ok = os.EX_OK
_exit_fail = not _exit_ok
def latest_version():
return _versions[-1]
def version(args):
print('%s %s' % (args, latest_version()))
raise SystemExit
def parse_args(add_args, docstring):
"""Parse out command line arguments"""
def run_args(args):
"""Run any global methods eponymous with args"""
valuable_args = {k for k, v in args.__dict__.items() if v}
arg_methods = {globals()[a] for a in valuable_args if a in globals()}
if not arg_methods:
return
for method in arg_methods:
method(args)
raise SystemExit(_exit_ok)
parser = argparse.ArgumentParser(
description=docstring
and docstring.splitlines()[0]
or "No docstring provided")
result = add_args(parser)
parser = result if result else parser
parser.add_argument('-v', '--version', action='store_true',
help='Show version [default: %s]' % __version__)
# parser.add_argument('-q', '--quiet', action='store_true',
# help='Do not show stdout')
# parser.add_argument('-Q', '--quiet_errors', action='store_true',
# help='Do not show stderr')
args = parser.parse_args()
run_args(args)
return args
# pylint: disable=redefined-outer-name
def main(method, add_args, version=None,
docstring=None, error_stream=sys.stderr):
"""Run the method as a script
Add a '--version' argument
Call add_args(parser) to add any more
Parse sys.argv
call method(args)
Catch any exceptions
exit on SystemExit or KeyboardError
Others:
If version is less then 1, reraise them
Else print them to the error stream
"""
if version:
_versions.append(version)
try:
args = parse_args(add_args, docstring)
return _exit_ok if method(args) else _exit_fail
except KeyboardInterrupt as e:
ctrl_c = 3
return ctrl_c
except SystemExit as e:
return e.code
except DebugExit:
return _exit_fail
except Exception, e: # pylint: disable=broad-except
if int(latest_version().split('.')[0]) < 1:
raise
if error_stream:
print(e, file=error_stream)
return _exit_fail
return _exit_ok
Depend less directly on args
"""Framework for a script to be run from a shell"""
from __future__ import print_function
import argparse
import os
import sys
from dotsite import __version__
from dotsite.debuggers import DebugExit
_versions = [__version__]
_exit_ok = os.EX_OK
_exit_fail = not _exit_ok
args = None
def latest_version():
return _versions[-1]
def version():
print('%s %s' % (args, latest_version()))
raise SystemExit
def parse_args(add_args, docstring):
"""Parse out command line arguments"""
def run_args():
"""Run any global methods eponymous with args"""
valuable_args = {k for k, v in args.__dict__.items() if v}
arg_methods = {globals()[a] for a in valuable_args if a in globals()}
if not arg_methods:
return
for method in arg_methods:
method()
raise SystemExit(_exit_ok)
parser = argparse.ArgumentParser(
description=docstring and docstring.splitlines()[0] or "No docstring")
result = add_args(parser)
parser = result if result else parser
parser.add_argument('-v', '--version', action='store_true',
help='Show version [default: %s]' % __version__)
# parser.add_argument('-q', '--quiet', action='store_true',
# help='Do not show stdout')
# parser.add_argument('-Q', '--quiet_errors', action='store_true',
# help='Do not show stderr')
global args # pylint: disable=global-statement
args = parser.parse_args()
run_args()
return args
# pylint: disable=redefined-outer-name
def main(method, add_args, version=None,
docstring=None, error_stream=sys.stderr):
"""Run the method as a script
Add a '--version' argument
Call add_args(parser) to add any more
Parse sys.argv
call method(args)
Catch any exceptions
exit on SystemExit or KeyboardError
Others:
If version is less then 1, reraise them
Else print them to the error stream
"""
if version:
_versions.append(version)
try:
args = parse_args(add_args, docstring)
args = parse_args(
add_args,
docstring and docstring or '%s()' % method.__name__)
return _exit_ok if method(args) else _exit_fail
except KeyboardInterrupt as e:
ctrl_c = 3
return ctrl_c
except SystemExit as e:
return e.code
except DebugExit:
return _exit_fail
except Exception as e: # pylint: disable=broad-except
if int(latest_version().split('.')[0]) < 1:
raise
if error_stream:
print(e, file=error_stream)
return _exit_fail
return _exit_ok
|
conf_neutron_conf = """[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = True
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = ml2
# Example: core_plugin = ml2
# (StrOpt) Neutron IPAM (IP address management) driver to be loaded from the
# neutron.ipam_drivers namespace. See setup.cfg for the entry point names.
# If ipam_driver is not set (default behavior), no ipam driver is used.
# Example: ipam_driver =
# In order to use the reference implementation of neutron ipam driver, use
# 'internal'.
# Example: ipam_driver = internal
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos
# Paste configuration file
# api_paste_config = api-paste.ini
# (StrOpt) Hostname to be used by the neutron server, agents and services
# running on this machine. All the agents and services running on this machine
# must use the same host value.
# The default value is hostname of the machine.
#
# host =
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Domain to use for building the hostnames
# dns_domain = openstacklocal
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# Default Subnet Pool to be used for IPv4 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being called
# without a subnet-pool ID. The default of None means that no pool will be
# used unless passed explicitly to subnet create. If no pool is used, then a
# CIDR must be passed to create a subnet and that subnet will not be allocated
# from any pool; it will be considered part of the tenant's private address
# space.
# default_ipv4_subnet_pool =
# Default Subnet Pool to be used for IPv6 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being
# called without a subnet-pool ID. Set to "prefix_delegation"
# to enable IPv6 Prefix Delegation in a PD-capable environment.
# See the description for default_ipv4_subnet_pool for more information.
# default_ipv6_subnet_pool =
# =========== items for MTU selection and advertisement =============
# Advertise MTU. If True, effort is made to advertise MTU
# settings to VMs via network methods (ie. DHCP and RA MTU options)
# when the network's preferred MTU is known.
# advertise_mtu = False
# ======== end of items for MTU selection and advertisement =========
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# Agent starts with admin_state_up=False when enable_new_agents=False.
# In the case, user's resources will not be scheduled automatically to the
# agent until admin changes admin_state_up to True.
# enable_new_agents = True
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# (StrOpt) Representing the resource type whose load is being reported by
# the agent.
# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
# the server will extract particular load sent as part of its agent configuration object
# from the agent report state, which is the number of resources being consumed, at
# every report_interval.
# dhcp_load_type can be used in combination with network_scheduler_driver =
# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
# be configured to represent the choice for the resource being balanced.
# Example: dhcp_load_type = networks
# Values:
# networks - number of networks hosted on the agent
# subnets - number of subnets associated with the networks hosted on the agent
# ports - number of ports associated with the networks hosted on the agent
# dhcp_load_type = networks
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Allow automatic removal of networks from dead DHCP agents with
# admin_state_up set to True.
# Networks could then be rescheduled if network_auto_schedule is True
# allow_automatic_dhcp_failover = True
# Number of DHCP agents scheduled to host a tenant network.
# If this number is greater than 1, the scheduler automatically
# assigns multiple DHCP agents for a given tenant network,
# providing high availability for DHCP service.
# dhcp_agents_per_network = 1
# Enable services on agents with admin_state_up False.
# If this option is False, when admin_state_up of an agent is turned to
# False, services on it will be disabled. If this option is True, services
# on agents with admin_state_up False keep available and manual scheduling
# to such agents is available. Agents with admin_state_up False are not
# selected for automatic scheduling regardless of this option.
# enable_services_on_agents_with_admin_state_down = False
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
#
# Enable snat by default on external gateway when available
# enable_snat_by_default = True
#
# The network type to use when creating the HA network for an HA router.
# By default or if empty, the first 'tenant_network_types'
# is used. This is helpful when the VRRP traffic should use a specific
# network which not the default one.
# ha_network_type =
# Example: ha_network_type = flat
#
# The physical network name with which the HA network can be created.
# ha_network_physical_name =
# Example: ha_network_physical_name = physnet1
# =========== end of items for l3 extension =======
# =========== items for metadata proxy configuration ==============
# User (uid or name) running metadata proxy after its initialization
# (if empty: agent effective user)
# metadata_proxy_user =
# Group (gid or name) running metadata proxy after its initialization
# (if empty: agent effective group)
# metadata_proxy_group =
# Enable/Disable log watch by metadata proxy, it should be disabled when
# metadata_proxy_user/group is not allowed to read/write its log file and
# 'copytruncate' logrotate option must be used if logrotate is enabled on
# metadata proxy log files. Option default value is deduced from
# metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
# effective user id/name.
# metadata_proxy_watch_log =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# =========== end of items for metadata proxy configuration ==============
# ========== items for VLAN trunking networks ==========
# Setting this flag to True will allow plugins that support it to
# create VLAN transparent networks. This flag has no effect for
# plugins that do not support VLAN transparent networks.
# vlan_transparent = False
# ========== end of items for VLAN trunking networks ==========
# =========== WSGI parameters related to the API server ==============
# Number of separate API worker processes to spawn. If not specified or < 1,
# the default value is equal to the number of CPUs available.
# api_workers = <number of CPUs>
# Number of separate RPC worker processes to spawn. If not specified or < 1,
# a single RPC worker process is spawned by the parent process.
# rpc_workers = 1
# Timeout for client connections socket operations. If an
# incoming connection is idle for this number of seconds it
# will be closed. A value of '0' means wait forever. (integer
# value)
# client_socket_timeout = 900
# wsgi keepalive option. Determines if connections are allowed to be held open
# by clients after a request is fulfilled. A value of False will ensure that
# the socket connection will be explicitly closed once a response has been
# sent to the client.
# wsgi_keep_alive = True
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# The name of the admin nova tenant. If the uuid of the admin nova tenant
# is set, this is optional. Useful for cases where the uuid of the admin
# nova tenant is not available when configuration is being done.
# nova_admin_tenant_name =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
# amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
# rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
# qpid_hostname=localhost
# Qpid broker port. (integer value)
# qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
# qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# qpid_username=
# Password for Qpid connection. (string value)
# qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
# qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
# qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
# qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
# qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
# kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
# kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
# kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
# kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
# kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
# rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
# rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
# rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
# rabbit_userid=guest
# The RabbitMQ password. (string value)
# rabbit_password=guest
# the RabbitMQ login method (string value)
# rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
# rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
# rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
# rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
# rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
# rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
# fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
# rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
# rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
# rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
# rpc_zmq_topic_backlog=
# Directory for holding IPC sockets. (string value)
# rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
# rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
# rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
# matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
# matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
# rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
# notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
# rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
# transport_url=
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
# control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
# host=127.0.0.1
# Use this port to connect to redis host. (integer value)
# port=6379
# Password for Redis server (optional). (string value)
# password=
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
# ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota.driver.DbQuotaDriver
# Resource name(s) that are supported in quota features
# This option is deprecated for removal in the M release, please refrain from using it
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of loadbalancers allowed per tenant. A negative value means unlimited.
# quota_loadbalancer = 10
# Number of listeners allowed per tenant. A negative value means unlimited.
# quota_listener = -1
# Number of v2 health monitors allowed per tenant. A negative value means
# unlimited. These health monitors exist under the lbaas v2 API
# quota_healthmonitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
# Default number of RBAC entries allowed per tenant. A negative value means
# unlimited.
# quota_rbac_policy = 10
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the command directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# Set to true to add comments to generated iptables rules that describe
# each rule's purpose. (System must support the iptables comments module.)
# comment_iptables_rules = True
# Root helper daemon application to use when possible.
# root_helper_daemon =
# Use the root helper when listing the namespaces on a system. This may not
# be required depending on the security configuration. If the root helper is
# not required, set this to False for a performance improvement.
# use_helper_for_ns_read = True
# The interval to check external processes for failure in seconds (0=disabled)
# check_child_processes_interval = 60
# Action to take when an external process spawned by an agent dies
# Values:
# respawn - Respawns the external process
# exit - Exits the agent
# check_child_processes_action = respawn
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[cors]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[cors.subdomain]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[keystone_authtoken]
auth_uri = {{ auth_uri }}
auth_url = {{ auth_url }}
memcached_servers = {{ memcached_servers }}
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = {{ neutron_pass }}
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql+pymysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
#connection = sqlite:////var/lib/neutron/neutron.sqlite
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[nova]
# Name of the plugin to load
# auth_plugin =
# Config Section from which to load plugin specific options
# auth_section =
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# cafile =
# PEM encoded client certificate cert file
# certfile =
# Verify HTTPS connections.
# insecure = False
# PEM encoded client certificate key file
# keyfile =
# Name of nova region to use. Useful if keystone manages more than one region.
# region_name =
# Timeout value for http requests
# timeout =
[oslo_concurrency]
# Directory to use for lock files. For security, the specified directory should
# only be writable by the user running the processes that need locking.
# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
# a lock path must be set.
lock_path = $state_path/lock
# Enables or disables inter-process locks.
# disable_process_locking = False
[oslo_policy]
# The JSON file that defines policies.
# policy_file = policy.json
# Default rule. Enforced when a requested rule is not found.
# policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path defined by the
# config_dir option, or absolute paths. The file defined by policy_file
# must exist for these directories to be searched. Missing or empty
# directories are ignored.
# policy_dirs = policy.d
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# Address prefix used when sending to a specific server (string value)
# Deprecated group/name - [amqp1]/server_request_prefix
# server_request_prefix = exclusive
# Address prefix used when broadcasting to all servers (string value)
# Deprecated group/name - [amqp1]/broadcast_prefix
# broadcast_prefix = broadcast
# Address prefix when sending to any server in group (string value)
# Deprecated group/name - [amqp1]/group_request_prefix
# group_request_prefix = unicast
# Name for the AMQP container (string value)
# Deprecated group/name - [amqp1]/container_name
# container_name =
# Timeout for inactive connections (in seconds) (integer value)
# Deprecated group/name - [amqp1]/idle_timeout
# idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
# Deprecated group/name - [amqp1]/trace
# trace = false
# CA certificate PEM file for verifing server certificate (string value)
# Deprecated group/name - [amqp1]/ssl_ca_file
# ssl_ca_file =
# Identifying certificate PEM file to present to clients (string value)
# Deprecated group/name - [amqp1]/ssl_cert_file
# ssl_cert_file =
# Private key PEM file used to sign cert_file certificate (string value)
# Deprecated group/name - [amqp1]/ssl_key_file
# ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
# Deprecated group/name - [amqp1]/ssl_key_password
# ssl_key_password =
# Accept clients using either SSL or plain TCP (boolean value)
# Deprecated group/name - [amqp1]/allow_insecure_clients
# allow_insecure_clients = false
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
[oslo_messaging_qpid]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# Qpid broker hostname. (string value)
# Deprecated group/name - [DEFAULT]/qpid_hostname
# qpid_hostname = localhost
# Qpid broker port. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_port
# qpid_port = 5672
# Qpid HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/qpid_hosts
# qpid_hosts = $qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_username
# qpid_username =
# Password for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_password
# qpid_password =
# Space separated list of SASL mechanisms to use for auth. (string value)
# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
# qpid_sasl_mechanisms =
# Seconds between connection keepalive heartbeats. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_heartbeat
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# Deprecated group/name - [DEFAULT]/qpid_protocol
# qpid_protocol = tcp
# Whether to disable the Nagle algorithm. (boolean value)
# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
# qpid_tcp_nodelay = true
# The number of prefetched messages held by receiver. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
# qpid_receiver_capacity = 1
# The qpid topology version to use. Version 1 is what was originally used by
# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
# broker federation to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_topology_version
# qpid_topology_version = 1
[oslo_messaging_rabbit]
rabbit_hosts = {{ rabbit_hosts }}
rabbit_userid = {{ rabbit_user }}
rabbit_password = {{ rabbit_password }}
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
# kombu_ssl_ca_certs =
# How long to wait before reconnecting in response to an AMQP consumer cancel
# notification. (floating point value)
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
# kombu_reconnect_delay = 1.0
# The RabbitMQ broker address where a single node is used. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_host
# rabbit_host = localhost
# The RabbitMQ broker port where a single node is used. (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_port
# rabbit_port = 5672
# RabbitMQ HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/rabbit_hosts
# rabbit_hosts = $rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
# rabbit_use_ssl = false
# The RabbitMQ userid. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_userid
# rabbit_userid = guest
# The RabbitMQ password. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_password
# rabbit_password = guest
# The RabbitMQ login method. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_login_method
# rabbit_login_method = AMQPLAIN
# The RabbitMQ virtual host. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
# rabbit_virtual_host = /
# How frequently to retry connecting with RabbitMQ. (integer value)
# rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
# rabbit_retry_backoff = 2
# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
# count). (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
# rabbit_max_retries = 0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
# must wipe the RabbitMQ database. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
# rabbit_ha_queues = false
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
# fake_rabbit = false
[qos]
# Drivers list to use to send the update notification
# notification_drivers = message_queue
"""
Add ssl section
conf_neutron_conf = """[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = True
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = ml2
# Example: core_plugin = ml2
# (StrOpt) Neutron IPAM (IP address management) driver to be loaded from the
# neutron.ipam_drivers namespace. See setup.cfg for the entry point names.
# If ipam_driver is not set (default behavior), no ipam driver is used.
# Example: ipam_driver =
# In order to use the reference implementation of neutron ipam driver, use
# 'internal'.
# Example: ipam_driver = internal
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering,qos
# Paste configuration file
# api_paste_config = api-paste.ini
# (StrOpt) Hostname to be used by the neutron server, agents and services
# running on this machine. All the agents and services running on this machine
# must use the same host value.
# The default value is hostname of the machine.
#
# host =
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Domain to use for building the hostnames
# dns_domain = openstacklocal
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# Default Subnet Pool to be used for IPv4 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being called
# without a subnet-pool ID. The default of None means that no pool will be
# used unless passed explicitly to subnet create. If no pool is used, then a
# CIDR must be passed to create a subnet and that subnet will not be allocated
# from any pool; it will be considered part of the tenant's private address
# space.
# default_ipv4_subnet_pool =
# Default Subnet Pool to be used for IPv6 subnet-allocation.
# Specifies by UUID the pool to be used in case of subnet-create being
# called without a subnet-pool ID. Set to "prefix_delegation"
# to enable IPv6 Prefix Delegation in a PD-capable environment.
# See the description for default_ipv4_subnet_pool for more information.
# default_ipv6_subnet_pool =
# =========== items for MTU selection and advertisement =============
# Advertise MTU. If True, effort is made to advertise MTU
# settings to VMs via network methods (ie. DHCP and RA MTU options)
# when the network's preferred MTU is known.
# advertise_mtu = False
# ======== end of items for MTU selection and advertisement =========
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# Agent starts with admin_state_up=False when enable_new_agents=False.
# In the case, user's resources will not be scheduled automatically to the
# agent until admin changes admin_state_up to True.
# enable_new_agents = True
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# (StrOpt) Representing the resource type whose load is being reported by
# the agent.
# This can be 'networks','subnets' or 'ports'. When specified (Default is networks),
# the server will extract particular load sent as part of its agent configuration object
# from the agent report state, which is the number of resources being consumed, at
# every report_interval.
# dhcp_load_type can be used in combination with network_scheduler_driver =
# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can
# be configured to represent the choice for the resource being balanced.
# Example: dhcp_load_type = networks
# Values:
# networks - number of networks hosted on the agent
# subnets - number of subnets associated with the networks hosted on the agent
# ports - number of ports associated with the networks hosted on the agent
# dhcp_load_type = networks
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Allow automatic removal of networks from dead DHCP agents with
# admin_state_up set to True.
# Networks could then be rescheduled if network_auto_schedule is True
# allow_automatic_dhcp_failover = True
# Number of DHCP agents scheduled to host a tenant network.
# If this number is greater than 1, the scheduler automatically
# assigns multiple DHCP agents for a given tenant network,
# providing high availability for DHCP service.
# dhcp_agents_per_network = 1
# Enable services on agents with admin_state_up False.
# If this option is False, when admin_state_up of an agent is turned to
# False, services on it will be disabled. If this option is True, services
# on agents with admin_state_up False keep available and manual scheduling
# to such agents is available. Agents with admin_state_up False are not
# selected for automatic scheduling regardless of this option.
# enable_services_on_agents_with_admin_state_down = False
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
#
# Enable snat by default on external gateway when available
# enable_snat_by_default = True
#
# The network type to use when creating the HA network for an HA router.
# By default or if empty, the first 'tenant_network_types'
# is used. This is helpful when the VRRP traffic should use a specific
# network which not the default one.
# ha_network_type =
# Example: ha_network_type = flat
#
# The physical network name with which the HA network can be created.
# ha_network_physical_name =
# Example: ha_network_physical_name = physnet1
# =========== end of items for l3 extension =======
# =========== items for metadata proxy configuration ==============
# User (uid or name) running metadata proxy after its initialization
# (if empty: agent effective user)
# metadata_proxy_user =
# Group (gid or name) running metadata proxy after its initialization
# (if empty: agent effective group)
# metadata_proxy_group =
# Enable/Disable log watch by metadata proxy, it should be disabled when
# metadata_proxy_user/group is not allowed to read/write its log file and
# 'copytruncate' logrotate option must be used if logrotate is enabled on
# metadata proxy log files. Option default value is deduced from
# metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent
# effective user id/name.
# metadata_proxy_watch_log =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# =========== end of items for metadata proxy configuration ==============
# ========== items for VLAN trunking networks ==========
# Setting this flag to True will allow plugins that support it to
# create VLAN transparent networks. This flag has no effect for
# plugins that do not support VLAN transparent networks.
# vlan_transparent = False
# ========== end of items for VLAN trunking networks ==========
# =========== WSGI parameters related to the API server ==============
# Number of separate API worker processes to spawn. If not specified or < 1,
# the default value is equal to the number of CPUs available.
# api_workers = <number of CPUs>
# Number of separate RPC worker processes to spawn. If not specified or < 1,
# a single RPC worker process is spawned by the parent process.
# rpc_workers = 1
# Timeout for client connections socket operations. If an
# incoming connection is idle for this number of seconds it
# will be closed. A value of '0' means wait forever. (integer
# value)
# client_socket_timeout = 900
# wsgi keepalive option. Determines if connections are allowed to be held open
# by clients after a request is fulfilled. A value of False will ensure that
# the socket connection will be explicitly closed once a response has been
# sent to the client.
# wsgi_keep_alive = True
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# The name of the admin nova tenant. If the uuid of the admin nova tenant
# is set, this is optional. Useful for cases where the uuid of the admin
# nova tenant is not available when configuration is being done.
# nova_admin_tenant_name =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
# amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
# rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
# qpid_hostname=localhost
# Qpid broker port. (integer value)
# qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
# qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# qpid_username=
# Password for Qpid connection. (string value)
# qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
# qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
# qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
# qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
# qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
# kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
# kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
# kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
# kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
# kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
# rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
# rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
# rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
# rabbit_userid=guest
# The RabbitMQ password. (string value)
# rabbit_password=guest
# the RabbitMQ login method (string value)
# rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
# rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
# rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
# rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
# rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
# rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
# fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
# rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
# rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
# rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
# rpc_zmq_topic_backlog=
# Directory for holding IPC sockets. (string value)
# rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
# rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
# rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
# matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
# matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
# rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
# notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
# rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
# transport_url=
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
# control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
# host=127.0.0.1
# Use this port to connect to redis host. (integer value)
# port=6379
# Password for Redis server (optional). (string value)
# password=
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
# ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota.driver.DbQuotaDriver
# Resource name(s) that are supported in quota features
# This option is deprecated for removal in the M release, please refrain from using it
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of loadbalancers allowed per tenant. A negative value means unlimited.
# quota_loadbalancer = 10
# Number of listeners allowed per tenant. A negative value means unlimited.
# quota_listener = -1
# Number of v2 health monitors allowed per tenant. A negative value means
# unlimited. These health monitors exist under the lbaas v2 API
# quota_healthmonitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
# Default number of RBAC entries allowed per tenant. A negative value means
# unlimited.
# quota_rbac_policy = 10
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the command directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# Set to true to add comments to generated iptables rules that describe
# each rule's purpose. (System must support the iptables comments module.)
# comment_iptables_rules = True
# Root helper daemon application to use when possible.
# root_helper_daemon =
# Use the root helper when listing the namespaces on a system. This may not
# be required depending on the security configuration. If the root helper is
# not required, set this to False for a performance improvement.
# use_helper_for_ns_read = True
# The interval to check external processes for failure in seconds (0=disabled)
# check_child_processes_interval = 60
# Action to take when an external process spawned by an agent dies
# Values:
# respawn - Respawns the external process
# exit - Exits the agent
# check_child_processes_action = respawn
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[cors]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[cors.subdomain]
#
# From oslo.middleware.cors
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID
[keystone_authtoken]
auth_uri = {{ auth_uri }}
auth_url = {{ auth_url }}
memcached_servers = {{ memcached_servers }}
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = {{ neutron_pass }}
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql+pymysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
#connection = sqlite:////var/lib/neutron/neutron.sqlite
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[nova]
# Name of the plugin to load
# auth_plugin =
# Config Section from which to load plugin specific options
# auth_section =
# PEM encoded Certificate Authority to use when verifying HTTPs connections.
# cafile =
# PEM encoded client certificate cert file
# certfile =
# Verify HTTPS connections.
# insecure = False
# PEM encoded client certificate key file
# keyfile =
# Name of nova region to use. Useful if keystone manages more than one region.
# region_name =
# Timeout value for http requests
# timeout =
[oslo_concurrency]
# Directory to use for lock files. For security, the specified directory should
# only be writable by the user running the processes that need locking.
# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used,
# a lock path must be set.
lock_path = $state_path/lock
# Enables or disables inter-process locks.
# disable_process_locking = False
[oslo_policy]
# The JSON file that defines policies.
# policy_file = policy.json
# Default rule. Enforced when a requested rule is not found.
# policy_default_rule = default
# Directories where policy configuration files are stored.
# They can be relative to any directory in the search path defined by the
# config_dir option, or absolute paths. The file defined by policy_file
# must exist for these directories to be searched. Missing or empty
# directories are ignored.
# policy_dirs = policy.d
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# Address prefix used when sending to a specific server (string value)
# Deprecated group/name - [amqp1]/server_request_prefix
# server_request_prefix = exclusive
# Address prefix used when broadcasting to all servers (string value)
# Deprecated group/name - [amqp1]/broadcast_prefix
# broadcast_prefix = broadcast
# Address prefix when sending to any server in group (string value)
# Deprecated group/name - [amqp1]/group_request_prefix
# group_request_prefix = unicast
# Name for the AMQP container (string value)
# Deprecated group/name - [amqp1]/container_name
# container_name =
# Timeout for inactive connections (in seconds) (integer value)
# Deprecated group/name - [amqp1]/idle_timeout
# idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
# Deprecated group/name - [amqp1]/trace
# trace = false
# CA certificate PEM file for verifing server certificate (string value)
# Deprecated group/name - [amqp1]/ssl_ca_file
# ssl_ca_file =
# Identifying certificate PEM file to present to clients (string value)
# Deprecated group/name - [amqp1]/ssl_cert_file
# ssl_cert_file =
# Private key PEM file used to sign cert_file certificate (string value)
# Deprecated group/name - [amqp1]/ssl_key_file
# ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
# Deprecated group/name - [amqp1]/ssl_key_password
# ssl_key_password =
# Accept clients using either SSL or plain TCP (boolean value)
# Deprecated group/name - [amqp1]/allow_insecure_clients
# allow_insecure_clients = false
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
[oslo_messaging_qpid]
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# Qpid broker hostname. (string value)
# Deprecated group/name - [DEFAULT]/qpid_hostname
# qpid_hostname = localhost
# Qpid broker port. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_port
# qpid_port = 5672
# Qpid HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/qpid_hosts
# qpid_hosts = $qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_username
# qpid_username =
# Password for Qpid connection. (string value)
# Deprecated group/name - [DEFAULT]/qpid_password
# qpid_password =
# Space separated list of SASL mechanisms to use for auth. (string value)
# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms
# qpid_sasl_mechanisms =
# Seconds between connection keepalive heartbeats. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_heartbeat
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'. (string value)
# Deprecated group/name - [DEFAULT]/qpid_protocol
# qpid_protocol = tcp
# Whether to disable the Nagle algorithm. (boolean value)
# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay
# qpid_tcp_nodelay = true
# The number of prefetched messages held by receiver. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity
# qpid_receiver_capacity = 1
# The qpid topology version to use. Version 1 is what was originally used by
# impl_qpid. Version 2 includes some backwards-incompatible changes that allow
# broker federation to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break. (integer value)
# Deprecated group/name - [DEFAULT]/qpid_topology_version
# qpid_topology_version = 1
[oslo_messaging_rabbit]
rabbit_hosts = {{ rabbit_hosts }}
rabbit_userid = {{ rabbit_user }}
rabbit_password = {{ rabbit_password }}
#
# From oslo.messaging
#
# Use durable queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
# amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
# Deprecated group/name - [DEFAULT]/amqp_auto_delete
# amqp_auto_delete = false
# Size of RPC connection pool. (integer value)
# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size
# rpc_conn_pool_size = 30
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_version
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs
# kombu_ssl_ca_certs =
# How long to wait before reconnecting in response to an AMQP consumer cancel
# notification. (floating point value)
# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay
# kombu_reconnect_delay = 1.0
# The RabbitMQ broker address where a single node is used. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_host
# rabbit_host = localhost
# The RabbitMQ broker port where a single node is used. (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_port
# rabbit_port = 5672
# RabbitMQ HA cluster host:port pairs. (list value)
# Deprecated group/name - [DEFAULT]/rabbit_hosts
# rabbit_hosts = $rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_use_ssl
# rabbit_use_ssl = false
# The RabbitMQ userid. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_userid
# rabbit_userid = guest
# The RabbitMQ password. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_password
# rabbit_password = guest
# The RabbitMQ login method. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_login_method
# rabbit_login_method = AMQPLAIN
# The RabbitMQ virtual host. (string value)
# Deprecated group/name - [DEFAULT]/rabbit_virtual_host
# rabbit_virtual_host = /
# How frequently to retry connecting with RabbitMQ. (integer value)
# rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff
# rabbit_retry_backoff = 2
# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry
# count). (integer value)
# Deprecated group/name - [DEFAULT]/rabbit_max_retries
# rabbit_max_retries = 0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you
# must wipe the RabbitMQ database. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_ha_queues
# rabbit_ha_queues = false
# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value)
# Deprecated group/name - [DEFAULT]/fake_rabbit
# fake_rabbit = false
[qos]
# Drivers list to use to send the update notification
# notification_drivers = message_queue
[ssl]
#
# From oslo.service.sslutils
#
# CA certificate file to use to verify connecting clients. (string value)
# Deprecated group/name - [DEFAULT]/ssl_ca_file
#ca_file = <None>
# Certificate file to use when starting the server securely. (string value)
# Deprecated group/name - [DEFAULT]/ssl_cert_file
#cert_file = <None>
# Private key file to use when starting the server securely. (string value)
# Deprecated group/name - [DEFAULT]/ssl_key_file
#key_file = <None>
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
#version = <None>
# Sets the list of available ciphers. value should be a string in the OpenSSL
# cipher list format. (string value)
#ciphers = <None>
"""
|
"""Define a connection to the SimpliSafe websocket."""
import asyncio
from dataclasses import InitVar, dataclass, field
from datetime import datetime
import logging
from typing import Awaitable, Callable, Optional
from urllib.parse import urlencode
from socketio import AsyncClient
from socketio.exceptions import ConnectionError as ConnError, SocketIOError
from simplipy.entity import EntityTypes
from simplipy.errors import WebsocketError
from simplipy.util.dt import utc_from_timestamp
_LOGGER = logging.getLogger(__name__)
API_URL_BASE: str = "wss://api.simplisafe.com/socket.io"
DEFAULT_WATCHDOG_TIMEOUT = 300
EVENT_ALARM_CANCELED = "alarm_canceled"
EVENT_ALARM_TRIGGERED = "alarm_triggered"
EVENT_ARMED_AWAY = "armed_away"
EVENT_ARMED_AWAY_BY_KEYPAD = "armed_away_by_keypad"
EVENT_ARMED_AWAY_BY_REMOTE = "armed_away_by_remote"
EVENT_ARMED_HOME = "armed_home"
EVENT_AUTOMATIC_TEST = "automatic_test"
EVENT_AWAY_EXIT_DELAY_BY_KEYPAD = "away_exit_delay_by_keypad"
EVENT_AWAY_EXIT_DELAY_BY_REMOTE = "away_exit_delay_by_remote"
EVENT_CAMERA_MOTION_DETECTED = "camera_motion_detected"
EVENT_CONNECTION_LOST = "connection_lost"
EVENT_CONNECTION_RESTORED = "connection_restored"
EVENT_DISARMED_BY_MASTER_PIN = "disarmed_by_master_pin"
EVENT_DISARMED_BY_REMOTE = "disarmed_by_remote"
EVENT_DOORBELL_DETECTED = "doorbell_detected"
EVENT_ENTRY_DETECTED = "entry_detected"
EVENT_HOME_EXIT_DELAY = "home_exit_delay"
EVENT_LOCK_ERROR = "lock_error"
EVENT_LOCK_LOCKED = "lock_locked"
EVENT_LOCK_UNLOCKED = "lock_unlocked"
EVENT_MOTION_DETECTED = "motion_detected"
EVENT_POWER_OUTAGE = "power_outage"
EVENT_POWER_RESTORED = "power_restored"
EVENT_SENSOR_NOT_RESPONDING = "sensor_not_responding"
EVENT_SENSOR_RESTORED = "sensor_restored"
EVENT_MAPPING = {
1110: EVENT_ALARM_TRIGGERED,
1120: EVENT_ALARM_TRIGGERED,
1132: EVENT_ALARM_TRIGGERED,
1134: EVENT_ALARM_TRIGGERED,
1154: EVENT_ALARM_TRIGGERED,
1159: EVENT_ALARM_TRIGGERED,
1162: EVENT_ALARM_TRIGGERED,
1170: EVENT_CAMERA_MOTION_DETECTED,
1301: EVENT_POWER_OUTAGE,
1350: EVENT_CONNECTION_LOST,
1381: EVENT_SENSOR_NOT_RESPONDING,
1400: EVENT_DISARMED_BY_MASTER_PIN,
1406: EVENT_ALARM_CANCELED,
1407: EVENT_DISARMED_BY_REMOTE,
1409: EVENT_MOTION_DETECTED,
1429: EVENT_ENTRY_DETECTED,
1458: EVENT_DOORBELL_DETECTED,
1602: EVENT_AUTOMATIC_TEST,
3301: EVENT_POWER_RESTORED,
3350: EVENT_CONNECTION_RESTORED,
3381: EVENT_SENSOR_RESTORED,
3401: EVENT_ARMED_AWAY_BY_KEYPAD,
3407: EVENT_ARMED_AWAY_BY_REMOTE,
3441: EVENT_ARMED_HOME,
3481: EVENT_ARMED_AWAY,
3487: EVENT_ARMED_AWAY,
3491: EVENT_ARMED_HOME,
9401: EVENT_AWAY_EXIT_DELAY_BY_KEYPAD,
9407: EVENT_AWAY_EXIT_DELAY_BY_REMOTE,
9441: EVENT_HOME_EXIT_DELAY,
9700: EVENT_LOCK_UNLOCKED,
9701: EVENT_LOCK_LOCKED,
9703: EVENT_LOCK_ERROR,
}
@dataclass(frozen=True) # pylint: disable=too-many-instance-attributes
class WebsocketEvent:
"""Define a representation of a message."""
event_cid: InitVar[int]
info: str
system_id: int
timestamp: datetime
event_type: Optional[str] = field(init=False)
changed_by: Optional[str] = None
sensor_name: Optional[str] = None
sensor_serial: Optional[str] = None
sensor_type: Optional[EntityTypes] = None
def __post_init__(self, event_cid):
"""Run post-init initialization."""
if event_cid in EVENT_MAPPING:
object.__setattr__(self, "event_type", EVENT_MAPPING[event_cid])
else:
_LOGGER.warning(
'Encountered unknown websocket event type: %s ("%s"). Please report it '
"at https://github.com/bachya/simplisafe-python/issues.",
event_cid,
self.info,
)
object.__setattr__(self, "event_type", None)
object.__setattr__(self, "timestamp", utc_from_timestamp(self.timestamp))
if self.sensor_type is not None:
try:
object.__setattr__(self, "sensor_type", EntityTypes(self.sensor_type))
except ValueError:
_LOGGER.warning(
'Encountered unknown entity type: %s ("%s"). Please report it at'
"https://github.com/home-assistant/home-assistant/issues.",
self.sensor_type,
self.info,
)
object.__setattr__(self, "sensor_type", None)
def websocket_event_from_raw_data(event: dict):
"""Create a Message object from a websocket event payload."""
return WebsocketEvent(
event["eventCid"],
event["info"],
event["sid"],
event["eventTimestamp"],
changed_by=event["pinName"],
sensor_name=event["sensorName"],
sensor_serial=event["sensorSerial"],
sensor_type=event["sensorType"],
)
class WebsocketWatchdog: # pylint: disable=too-few-public-methods
"""A watchdog that will ensure the websocket connection is functioning."""
def __init__(
self,
action: Callable[..., Awaitable],
*,
timeout_seconds: int = DEFAULT_WATCHDOG_TIMEOUT,
):
"""Initialize."""
self._action: Callable[..., Awaitable] = action
self._loop = asyncio.get_event_loop()
self._timer_task: Optional[asyncio.TimerHandle] = None
self._timeout: int = timeout_seconds
def cancel(self):
"""Cancel the watchdog."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
async def on_expire(self):
"""Log and act when the watchdog expires."""
_LOGGER.info("Watchdog expired – calling %s", self._action.__name__)
await self._action()
async def trigger(self):
"""Trigger the watchdog."""
_LOGGER.info("Watchdog triggered – sleeping for %s seconds", self._timeout)
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
self._timer_task = self._loop.call_later(
self._timeout, lambda: asyncio.create_task(self.on_expire())
)
class Websocket:
"""A websocket connection to the SimpliSafe cloud.
Note that this class shouldn't be instantiated directly; it will be instantiated as
appropriate via :meth:`simplipy.API.login_via_credentials` or
:meth:`simplipy.API.login_via_token`.
:param access_token: A SimpliSafe access token
:type access_token: ``str``
:param user_id: A SimpliSafe user ID
:type user_id: ``int``
"""
def __init__(self) -> None:
"""Initialize."""
self._async_disconnect_handler: Optional[Callable[..., Awaitable]] = None
self._sio: AsyncClient = AsyncClient()
self._watchdog: WebsocketWatchdog = WebsocketWatchdog(
self.async_reconnect, timeout_seconds=15
)
# Set by async_init():
self._access_token: Optional[str] = None
self._namespace: Optional[str] = None
async def async_init(
self, access_token: str, user_id: Optional[int] = None
) -> None:
"""Set the user ID and generate the namespace."""
if not self._namespace:
self._namespace = f"/v1/user/{user_id}"
self._access_token = access_token
# If the websocket is connected, reconnect it:
if self._sio.connected:
await self.async_reconnect()
async def async_connect(self) -> None:
"""Connect to the socket."""
params = {"ns": self._namespace, "accessToken": self._access_token}
try:
await self._sio.connect(
f"{API_URL_BASE}?{urlencode(params)}",
namespaces=[self._namespace],
transports=["websocket"],
)
except (ConnError, SocketIOError) as err:
raise WebsocketError(err) from None
async def async_disconnect(self) -> None:
"""Disconnect from the socket."""
await self._sio.disconnect()
if self._async_disconnect_handler:
await self._async_disconnect_handler()
self._async_disconnect_handler = None
def async_on_connect(self, target: Callable[..., Awaitable]) -> None:
"""Define a coroutine to be called when connecting.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_connect():
"""Act when connection occurs."""
await self._watchdog.trigger()
await target()
self._sio.on("connect", _async_on_connect)
def on_connect(self, target: Callable) -> None:
"""Define a synchronous method to be called when connecting.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _on_connect():
"""Act when connection occurs."""
await self._watchdog.trigger()
target()
self._sio.on("connect", _on_connect)
def async_on_disconnect(self, target: Callable[..., Awaitable]) -> None:
"""Define a coroutine to be called when disconnecting.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_disconnect():
"""Act when disconnection occurs."""
self._watchdog.cancel()
await target()
self._async_disconnect_handler = _async_on_disconnect
def on_disconnect(self, target: Callable) -> None:
"""Define a synchronous method to be called when disconnecting.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _async_on_disconnect():
"""Act when disconnection occurs."""
self._watchdog.cancel()
target()
self._async_disconnect_handler = _async_on_disconnect
def async_on_event(self, target: Callable[..., Awaitable]) -> None: # noqa: D202
"""Define a coroutine to be called an event is received.
The couroutine will have a ``data`` parameter that contains the raw data from
the event.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_event(event_data: dict):
"""Act on the Message object."""
await self._watchdog.trigger()
message = websocket_event_from_raw_data(event_data)
await target(message)
self._sio.on("event", _async_on_event, namespace=self._namespace)
def on_event(self, target: Callable) -> None: # noqa: D202
"""Define a synchronous method to be called when an event is received.
The method will have a ``data`` parameter that contains the raw data from the
event.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _async_on_event(event_data: dict):
"""Act on the Message object."""
await self._watchdog.trigger()
message = websocket_event_from_raw_data(event_data)
target(message)
self._sio.on("event", _async_on_event, namespace=self._namespace)
async def async_reconnect(self) -> None:
"""Reconnect the websocket connection."""
await self._sio.disconnect()
await asyncio.sleep(1)
await self.async_connect()
Fix watchdog timeout bug (#137)
"""Define a connection to the SimpliSafe websocket."""
import asyncio
from dataclasses import InitVar, dataclass, field
from datetime import datetime
import logging
from typing import Awaitable, Callable, Optional
from urllib.parse import urlencode
from socketio import AsyncClient
from socketio.exceptions import ConnectionError as ConnError, SocketIOError
from simplipy.entity import EntityTypes
from simplipy.errors import WebsocketError
from simplipy.util.dt import utc_from_timestamp
_LOGGER = logging.getLogger(__name__)
API_URL_BASE: str = "wss://api.simplisafe.com/socket.io"
DEFAULT_WATCHDOG_TIMEOUT = 900
EVENT_ALARM_CANCELED = "alarm_canceled"
EVENT_ALARM_TRIGGERED = "alarm_triggered"
EVENT_ARMED_AWAY = "armed_away"
EVENT_ARMED_AWAY_BY_KEYPAD = "armed_away_by_keypad"
EVENT_ARMED_AWAY_BY_REMOTE = "armed_away_by_remote"
EVENT_ARMED_HOME = "armed_home"
EVENT_AUTOMATIC_TEST = "automatic_test"
EVENT_AWAY_EXIT_DELAY_BY_KEYPAD = "away_exit_delay_by_keypad"
EVENT_AWAY_EXIT_DELAY_BY_REMOTE = "away_exit_delay_by_remote"
EVENT_CAMERA_MOTION_DETECTED = "camera_motion_detected"
EVENT_CONNECTION_LOST = "connection_lost"
EVENT_CONNECTION_RESTORED = "connection_restored"
EVENT_DISARMED_BY_MASTER_PIN = "disarmed_by_master_pin"
EVENT_DISARMED_BY_REMOTE = "disarmed_by_remote"
EVENT_DOORBELL_DETECTED = "doorbell_detected"
EVENT_ENTRY_DETECTED = "entry_detected"
EVENT_HOME_EXIT_DELAY = "home_exit_delay"
EVENT_LOCK_ERROR = "lock_error"
EVENT_LOCK_LOCKED = "lock_locked"
EVENT_LOCK_UNLOCKED = "lock_unlocked"
EVENT_MOTION_DETECTED = "motion_detected"
EVENT_POWER_OUTAGE = "power_outage"
EVENT_POWER_RESTORED = "power_restored"
EVENT_SENSOR_NOT_RESPONDING = "sensor_not_responding"
EVENT_SENSOR_RESTORED = "sensor_restored"
EVENT_MAPPING = {
1110: EVENT_ALARM_TRIGGERED,
1120: EVENT_ALARM_TRIGGERED,
1132: EVENT_ALARM_TRIGGERED,
1134: EVENT_ALARM_TRIGGERED,
1154: EVENT_ALARM_TRIGGERED,
1159: EVENT_ALARM_TRIGGERED,
1162: EVENT_ALARM_TRIGGERED,
1170: EVENT_CAMERA_MOTION_DETECTED,
1301: EVENT_POWER_OUTAGE,
1350: EVENT_CONNECTION_LOST,
1381: EVENT_SENSOR_NOT_RESPONDING,
1400: EVENT_DISARMED_BY_MASTER_PIN,
1406: EVENT_ALARM_CANCELED,
1407: EVENT_DISARMED_BY_REMOTE,
1409: EVENT_MOTION_DETECTED,
1429: EVENT_ENTRY_DETECTED,
1458: EVENT_DOORBELL_DETECTED,
1602: EVENT_AUTOMATIC_TEST,
3301: EVENT_POWER_RESTORED,
3350: EVENT_CONNECTION_RESTORED,
3381: EVENT_SENSOR_RESTORED,
3401: EVENT_ARMED_AWAY_BY_KEYPAD,
3407: EVENT_ARMED_AWAY_BY_REMOTE,
3441: EVENT_ARMED_HOME,
3481: EVENT_ARMED_AWAY,
3487: EVENT_ARMED_AWAY,
3491: EVENT_ARMED_HOME,
9401: EVENT_AWAY_EXIT_DELAY_BY_KEYPAD,
9407: EVENT_AWAY_EXIT_DELAY_BY_REMOTE,
9441: EVENT_HOME_EXIT_DELAY,
9700: EVENT_LOCK_UNLOCKED,
9701: EVENT_LOCK_LOCKED,
9703: EVENT_LOCK_ERROR,
}
@dataclass(frozen=True) # pylint: disable=too-many-instance-attributes
class WebsocketEvent:
"""Define a representation of a message."""
event_cid: InitVar[int]
info: str
system_id: int
timestamp: datetime
event_type: Optional[str] = field(init=False)
changed_by: Optional[str] = None
sensor_name: Optional[str] = None
sensor_serial: Optional[str] = None
sensor_type: Optional[EntityTypes] = None
def __post_init__(self, event_cid):
"""Run post-init initialization."""
if event_cid in EVENT_MAPPING:
object.__setattr__(self, "event_type", EVENT_MAPPING[event_cid])
else:
_LOGGER.warning(
'Encountered unknown websocket event type: %s ("%s"). Please report it '
"at https://github.com/bachya/simplisafe-python/issues.",
event_cid,
self.info,
)
object.__setattr__(self, "event_type", None)
object.__setattr__(self, "timestamp", utc_from_timestamp(self.timestamp))
if self.sensor_type is not None:
try:
object.__setattr__(self, "sensor_type", EntityTypes(self.sensor_type))
except ValueError:
_LOGGER.warning(
'Encountered unknown entity type: %s ("%s"). Please report it at'
"https://github.com/home-assistant/home-assistant/issues.",
self.sensor_type,
self.info,
)
object.__setattr__(self, "sensor_type", None)
def websocket_event_from_raw_data(event: dict):
"""Create a Message object from a websocket event payload."""
return WebsocketEvent(
event["eventCid"],
event["info"],
event["sid"],
event["eventTimestamp"],
changed_by=event["pinName"],
sensor_name=event["sensorName"],
sensor_serial=event["sensorSerial"],
sensor_type=event["sensorType"],
)
class WebsocketWatchdog: # pylint: disable=too-few-public-methods
"""A watchdog that will ensure the websocket connection is functioning."""
def __init__(
self,
action: Callable[..., Awaitable],
*,
timeout_seconds: int = DEFAULT_WATCHDOG_TIMEOUT,
):
"""Initialize."""
self._action: Callable[..., Awaitable] = action
self._loop = asyncio.get_event_loop()
self._timer_task: Optional[asyncio.TimerHandle] = None
self._timeout: int = timeout_seconds
def cancel(self):
"""Cancel the watchdog."""
if self._timer_task:
self._timer_task.cancel()
self._timer_task = None
async def on_expire(self):
"""Log and act when the watchdog expires."""
_LOGGER.info("Watchdog expired – calling %s", self._action.__name__)
await self._action()
async def trigger(self):
"""Trigger the watchdog."""
_LOGGER.info("Watchdog triggered – sleeping for %s seconds", self._timeout)
if self._timer_task:
self._timer_task.cancel()
self._timer_task = self._loop.call_later(
self._timeout, lambda: asyncio.create_task(self.on_expire())
)
class Websocket:
"""A websocket connection to the SimpliSafe cloud.
Note that this class shouldn't be instantiated directly; it will be instantiated as
appropriate via :meth:`simplipy.API.login_via_credentials` or
:meth:`simplipy.API.login_via_token`.
:param access_token: A SimpliSafe access token
:type access_token: ``str``
:param user_id: A SimpliSafe user ID
:type user_id: ``int``
"""
def __init__(self) -> None:
"""Initialize."""
self._async_disconnect_handler: Optional[Callable[..., Awaitable]] = None
self._sio: AsyncClient = AsyncClient()
self._watchdog: WebsocketWatchdog = WebsocketWatchdog(self.async_reconnect)
# Set by async_init():
self._access_token: Optional[str] = None
self._namespace: Optional[str] = None
async def async_init(
self, access_token: str, user_id: Optional[int] = None
) -> None:
"""Set the user ID and generate the namespace."""
if not self._namespace:
self._namespace = f"/v1/user/{user_id}"
self._access_token = access_token
# If the websocket is connected, reconnect it:
if self._sio.connected:
await self.async_reconnect()
async def async_connect(self) -> None:
"""Connect to the socket."""
params = {"ns": self._namespace, "accessToken": self._access_token}
try:
await self._sio.connect(
f"{API_URL_BASE}?{urlencode(params)}",
namespaces=[self._namespace],
transports=["websocket"],
)
except (ConnError, SocketIOError) as err:
raise WebsocketError(err) from None
async def async_disconnect(self) -> None:
"""Disconnect from the socket."""
await self._sio.disconnect()
if self._async_disconnect_handler:
await self._async_disconnect_handler()
self._async_disconnect_handler = None
def async_on_connect(self, target: Callable[..., Awaitable]) -> None:
"""Define a coroutine to be called when connecting.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_connect():
"""Act when connection occurs."""
await self._watchdog.trigger()
await target()
self._sio.on("connect", _async_on_connect)
def on_connect(self, target: Callable) -> None:
"""Define a synchronous method to be called when connecting.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _on_connect():
"""Act when connection occurs."""
await self._watchdog.trigger()
target()
self._sio.on("connect", _on_connect)
def async_on_disconnect(self, target: Callable[..., Awaitable]) -> None:
"""Define a coroutine to be called when disconnecting.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_disconnect():
"""Act when disconnection occurs."""
self._watchdog.cancel()
await target()
self._async_disconnect_handler = _async_on_disconnect
def on_disconnect(self, target: Callable) -> None:
"""Define a synchronous method to be called when disconnecting.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _async_on_disconnect():
"""Act when disconnection occurs."""
self._watchdog.cancel()
target()
self._async_disconnect_handler = _async_on_disconnect
def async_on_event(self, target: Callable[..., Awaitable]) -> None: # noqa: D202
"""Define a coroutine to be called an event is received.
The couroutine will have a ``data`` parameter that contains the raw data from
the event.
:param target: A coroutine
:type target: ``Callable[..., Awaitable]``
"""
async def _async_on_event(event_data: dict):
"""Act on the Message object."""
await self._watchdog.trigger()
message = websocket_event_from_raw_data(event_data)
await target(message)
self._sio.on("event", _async_on_event, namespace=self._namespace)
def on_event(self, target: Callable) -> None: # noqa: D202
"""Define a synchronous method to be called when an event is received.
The method will have a ``data`` parameter that contains the raw data from the
event.
:param target: A synchronous function
:type target: ``Callable``
"""
async def _async_on_event(event_data: dict):
"""Act on the Message object."""
await self._watchdog.trigger()
message = websocket_event_from_raw_data(event_data)
target(message)
self._sio.on("event", _async_on_event, namespace=self._namespace)
async def async_reconnect(self) -> None:
"""Reconnect the websocket connection."""
await self._sio.disconnect()
await asyncio.sleep(1)
await self.async_connect()
|
import sys
import getopt
import nltk
import math
import xml.etree.ElementTree as ET
import cPickle as pickle
from nltk.corpus import wordnet as wn
from nltk.tag import pos_tag
QUERY_DESCRIPTION_PREFIX = "Relevant documents will describe"
ZONE_WEIGHT_SAME = 0.7
ZONE_WEIGHT_CROSS = 0.3
TOP_N_GROUP = 4
INCREMENT_MULTIPLIER = 0.8
TOP_N_RESULT = 2
PRUNE_THRESHOLD = 14
"""
Loads the postings file by byte pointer linked with the given term in dictionary.
The returned objects either are regular postings lists with a list of doc_id, weighted tf pairs,
or special entries that contains different objects which are:
{
"TITLE DOC LENGTH TABLE" : dict<int:float>, a dictionary mapping document id and document length for title
"ABSTRACT DOC LENGTH TABLE" : dict<int:float>, a dictionary mapping document id and document length for abstract
"DOC ID MAP" : dict<int, str>, a dictionary that maps enumerated doc id to the actual doc id
"IPC GROUP DICTIONARY" : dict<int:str> a dictionary that maps enumerated doc id to IPC Group ID
"DIRECTORY_PATH" : str, directory path of corpus
}
Pre-condition: term in dictionary == True
get_postings_list_by_term(str, dict<str:int>, file) -> [(int, float), ...]
"""
def load_postings_by_term(term, dictionary, postings_reader):
postings_reader.seek(dictionary[term][1])
return pickle.load(postings_reader)
"""
Given raw query is tokenized and each term's frequency is calculated.
Returns a dictionary that maps each term with its term frequency.
The tokenization involves case-folding and stemming with PorterStemmer object.
Any words that contains non-ascii chars are ignored.
tokenize_query -> dict<term:term frequency, ...>
"""
def tokenize_query(raw_query):
temp = []
tokenized_query = {}
stemmer = nltk.stem.porter.PorterStemmer()
+ ''' # for nouns only synonyms
The approach with this commented code yields a lower score however we thought
it is still interesting enough to keep the algorithm commented within the code.
This is making use of synonym to do a query expansion provided in NLTK Synset.
We specifically pick the nouns in synsets because we believe that nouns will
help us guess the most relevant meanings for a patent information verbs or
adjectives do.
#tag what type of word it is and check for nouns later
tagged_query = pos_tag(nltk.word_tokenize(raw_query))
for word, pos in tagged_query:
tempList = []
temp.append(str(stemmer.stem(word.lower())))
#check if word is a type of noun, if yes, find syn as query expansion
#for information on tags -> nltk.help.upenn_tagset()
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):
for synset in wn.synsets(word):
for lemma in synset.lemmas():
tempList.append(lemma)
tempList = list(set(tempList))
for syn in tempList:
temp.append(str(stemmer.stem(syn.name().lower())))
'''
for word in nltk.word_tokenize(raw_query):
# Ignoring any word that contains non-ascii characters
try:
word.decode('ascii')
except UnicodeEncodeError:
continue
temp.append(str(stemmer.stem(word.lower())))
temp.sort()
for term in temp:
if term in tokenized_query:
tokenized_query[term] += 1
else:
tokenized_query[term] = 1
return tokenized_query
def vector_length(vector):
temp = 0
for term, tf_idf_w in vector:
temp += pow(tf_idf_w, 2)
return pow(temp, 1 / 2)
def perform_search(query_title, query_description, title_dictionary, abstract_dictionary, postings_reader):
# If title is missing, return empty string
if query_title.strip() == '':
return ''
# If description is missing, still query but description is None
if query_description.strip() == '':
query_description = None
score = {}
query_title_weighted_tf_idf_table_for_title = {}
query_title_weighted_tf_idf_table_for_abstract = {}
query_description_weighted_tf_idf_table_for_title = {}
query_description_weighted_tf_idf_table_for_abstract = {}
title_doc_length_table = load_postings_by_term("TITLE DOC LENGTH TABLE", title_dictionary, postings_reader)
abstract_doc_length_table = load_postings_by_term("ABSTRACT DOC LENGTH TABLE", abstract_dictionary, postings_reader)
query_title_tokens = tokenize_query(query_title)
query_description_tokens = tokenize_query(query_description)
# calculating each term's weighted tf-idf in query
for title_term, qt_frequency in query_title_tokens.iteritems():
# only calculating score if term is indexed
tf_w = 1 + math.log(qt_frequency, 10)
if title_term in title_dictionary:
idf_in_title = math.log(len(title_doc_length_table) / (title_dictionary[title_term][0] * 1.0), 10)
query_title_weighted_tf_idf_table_for_title[title_term] = tf_w * idf_in_title
if title_term in abstract_dictionary:
idf_in_abstract = math.log(len(abstract_doc_length_table) / (abstract_dictionary[title_term][0] * 1.0), 10)
query_title_weighted_tf_idf_table_for_abstract[title_term] = tf_w * idf_in_abstract
for description_term, qd_frequency in query_description_tokens.iteritems():
# only calculating score if term is indexed
tf_w = 1 + math.log(qd_frequency, 10)
if description_term in title_dictionary:
idf_in_title = math.log(len(title_doc_length_table) / (title_dictionary[description_term][0] * 1.0), 10)
query_description_weighted_tf_idf_table_for_title[description_term] = tf_w * idf_in_title
if description_term in abstract_dictionary:
idf_in_abstract = math.log(
len(abstract_doc_length_table) / (abstract_dictionary[description_term][0] * 1.0), 10)
query_description_weighted_tf_idf_table_for_abstract[description_term] = tf_w * idf_in_abstract
# calculating query length
query_title_length_for_title = vector_length(query_title_weighted_tf_idf_table_for_title.iteritems())
query_title_length_for_abstract = vector_length(query_title_weighted_tf_idf_table_for_abstract.iteritems())
query_description_length_for_title = vector_length(query_description_weighted_tf_idf_table_for_title.iteritems())
query_description_length_for_abstract = vector_length(
query_description_weighted_tf_idf_table_for_abstract.iteritems())
# calculating cosine angle between two vectors
# between tilte query and docs' titles
title_to_title_matched_ids = set()
for term, tf_idf_w in query_title_weighted_tf_idf_table_for_title.iteritems():
title_postings = load_postings_by_term(term, title_dictionary, postings_reader)
for doc_id, d_tf_w in title_postings:
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_title_length_for_title * title_doc_length_table[doc_id]) * ZONE_WEIGHT_SAME
title_to_title_matched_ids.add(doc_id)
# between tilte query and docs' abstracts
for term, tf_idf_w in query_title_weighted_tf_idf_table_for_abstract.iteritems():
abstract_postings = load_postings_by_term(term, abstract_dictionary, postings_reader)
for doc_id, d_tf_w in abstract_postings:
if doc_id in title_to_title_matched_ids:
continue
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_title_length_for_abstract * abstract_doc_length_table[doc_id]) * ZONE_WEIGHT_CROSS
# between tilte description and docs' abstracts
description_to_abstracts_matched_ids = set()
for term, tf_idf_w in query_description_weighted_tf_idf_table_for_abstract.iteritems():
abstract_postings = load_postings_by_term(term, abstract_dictionary, postings_reader)
for doc_id, d_tf_w in abstract_postings:
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_description_length_for_abstract * abstract_doc_length_table[doc_id]) * ZONE_WEIGHT_SAME
description_to_abstracts_matched_ids.add(doc_id)
# between tilte description and docs' title
for term, tf_idf_w in query_description_weighted_tf_idf_table_for_title.iteritems():
title_postings = load_postings_by_term(term, title_dictionary, postings_reader)
for doc_id, d_tf_w in title_postings:
if doc_id in description_to_abstracts_matched_ids:
continue
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_description_length_for_title * title_doc_length_table[doc_id]) * ZONE_WEIGHT_CROSS
return score
"""
Processes the raw string query and retrieves at most 10 documents by its ID for the query
that are the most relevant to the query. The returned string is a space-delimitered doc IDs
in the order of relevance from highest to the lowest.
The relevance is determined by the accumulated score of each document's cosine similarity
between its document vector and the query vector. The ranking scheme for the algorithm is
lnc.ltc in SMART notation.
search(dict<str:int>, file, str) -> str
"""
def search_query(title_dictionary, abstract_dictionary, postings_reader, query_file):
"""
:rtype : dictionary of doc id to query
"""
query = ET.parse(query_file).getroot()
query_title = query.find('title').text
query_description = query.find('description').text.strip()
if query_description[:len(QUERY_DESCRIPTION_PREFIX)] == QUERY_DESCRIPTION_PREFIX:
query_description = query_description[len(QUERY_DESCRIPTION_PREFIX):]
score = perform_search(query_title, query_description, title_dictionary, abstract_dictionary, postings_reader)
# sorting by score from most to the least
result = score.items()
result.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
doc_id_map = load_postings_by_term("DOC ID MAP", title_dictionary, postings_reader)
directory = title_dictionary["DIRECTORY_PATH"]
for num in range(0, TOP_N_RESULT):
if num >= len(result):
break
else:
content = ET.parse(directory + doc_id_map[result[num][0]] + ".xml").getroot()
title = abstract = None
for child in content:
name = child.get("name")
if name == "Title":
title = child.text
elif name == "Abstract":
abstract = child.text
score_for_new_query = perform_search(title, abstract, title_dictionary, abstract_dictionary, postings_reader)
for doc_id in score_for_new_query:
if doc_id in score:
score[doc_id] += score_for_new_query[doc_id]
else:
score[doc_id] = score_for_new_query[doc_id]
# sorting by score from most to the least
result = score.items()
result.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
# Magnifying top N groups with a multiplier
# This approach is using the assumption that if another patent belonging to
# the same group as the current top N results, it should also mean that it is
# more likely to be relevant than others not within the same group
# top N category score multiplier
IPC_group_dictionary = load_postings_by_term("IPC GROUP DICTIONARY", title_dictionary, postings_reader)
target_id_multiplier = {} #contains {group:multiplier to be applied}
multiplied_results = []
counter = 0
for num in range(TOP_N_GROUP, 0, -1):
# check if there are any more items to fit N groups
if counter >= len(result):
break
else:
# resolve group of this top N ranked item
target_doc_id = result[counter][0]
counter += 1
target_group = IPC_group_dictionary[target_doc_id]
# check if group was already recorded before
if target_group not in target_id_multiplier:
target_id_multiplier[target_group] = 1 + (num * INCREMENT_MULTIPLIER)
else:
#repeated group, skip
num += 1
# apply corresponding multipliers to scores of matching group
for doc_id, score in result:
temp_list = []
if IPC_group_dictionary[doc_id] in target_id_multiplier:
temp_list.append(doc_id)
temp_list.append(score * target_id_multiplier[IPC_group_dictionary[doc_id]])
multiplied_results.append(temp_list)
else:
temp_list.append(doc_id)
temp_list.append(score)
multiplied_results.append(temp_list)
# sort again after adjusting scores
multiplied_results.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
resultString = ""
#generate result string
for doc_id, score in multiplied_results:
''' # pruning of results based on threshold score
# this also had a negative impact on the results as it greatly reduces recall
# and is therefore commented out
# since scores are sorted, if reaches below threshold, stop appending
if score < PRUNE_THRESHOLD:
break
'''
resultString += doc_id_map[doc_id] + " "
return resultString[:-1]
def main(dictionary_file, postings_file, query_file, output_file):
(title_dictionary, abstract_dictionary) = pickle.load(open(dictionary_file, "rb"))
postings_reader = open(postings_file, "rb")
output = open(output_file, "w")
result = search_query(title_dictionary, abstract_dictionary, postings_reader, query_file)
output.write(result)
output.write('\n')
def usage():
print "usage: python search.py -d dictionary-file -p postings-file -q query-file -o output-file-of-results"
dictionary_file = postings_file = query_file = output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')
except getopt.GetoptError, err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dictionary_file = a
elif o == '-p':
postings_file = a
elif o == '-q':
query_file = a
elif o == '-o':
output_file = a
else:
assert False, "unhandled option"
if query_file == None or output_file == None or dictionary_file == None or postings_file == None:
usage()
sys.exit(2)
main(dictionary_file, postings_file, query_file, output_file)
spacing issues
import sys
import getopt
import nltk
import math
import xml.etree.ElementTree as ET
import cPickle as pickle
from nltk.corpus import wordnet as wn
from nltk.tag import pos_tag
QUERY_DESCRIPTION_PREFIX = "Relevant documents will describe"
ZONE_WEIGHT_SAME = 0.7
ZONE_WEIGHT_CROSS = 0.3
TOP_N_GROUP = 4
INCREMENT_MULTIPLIER = 0.8
TOP_N_RESULT = 2
PRUNE_THRESHOLD = 14
"""
Loads the postings file by byte pointer linked with the given term in dictionary.
The returned objects either are regular postings lists with a list of doc_id, weighted tf pairs,
or special entries that contains different objects which are:
{
"TITLE DOC LENGTH TABLE" : dict<int:float>, a dictionary mapping document id and document length for title
"ABSTRACT DOC LENGTH TABLE" : dict<int:float>, a dictionary mapping document id and document length for abstract
"DOC ID MAP" : dict<int, str>, a dictionary that maps enumerated doc id to the actual doc id
"IPC GROUP DICTIONARY" : dict<int:str> a dictionary that maps enumerated doc id to IPC Group ID
"DIRECTORY_PATH" : str, directory path of corpus
}
Pre-condition: term in dictionary == True
get_postings_list_by_term(str, dict<str:int>, file) -> [(int, float), ...]
"""
def load_postings_by_term(term, dictionary, postings_reader):
postings_reader.seek(dictionary[term][1])
return pickle.load(postings_reader)
"""
Given raw query is tokenized and each term's frequency is calculated.
Returns a dictionary that maps each term with its term frequency.
The tokenization involves case-folding and stemming with PorterStemmer object.
Any words that contains non-ascii chars are ignored.
tokenize_query -> dict<term:term frequency, ...>
"""
def tokenize_query(raw_query):
temp = []
tokenized_query = {}
stemmer = nltk.stem.porter.PorterStemmer()
'''
# for nouns only synonyms
The approach with this commented code yields a lower score however we thought
it is still interesting enough to keep the algorithm commented within the code.
This is making use of synonym to do a query expansion provided in NLTK Synset.
We specifically pick the nouns in synsets because we believe that nouns will
help us guess the most relevant meanings for a patent information verbs or
adjectives do.
#tag what type of word it is and check for nouns later
tagged_query = pos_tag(nltk.word_tokenize(raw_query))
for word, pos in tagged_query:
tempList = []
temp.append(str(stemmer.stem(word.lower())))
#check if word is a type of noun, if yes, find syn as query expansion
#for information on tags -> nltk.help.upenn_tagset()
if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'):
for synset in wn.synsets(word):
for lemma in synset.lemmas():
tempList.append(lemma)
tempList = list(set(tempList))
for syn in tempList:
temp.append(str(stemmer.stem(syn.name().lower())))
'''
for word in nltk.word_tokenize(raw_query):
# Ignoring any word that contains non-ascii characters
try:
word.decode('ascii')
except UnicodeEncodeError:
continue
temp.append(str(stemmer.stem(word.lower())))
temp.sort()
for term in temp:
if term in tokenized_query:
tokenized_query[term] += 1
else:
tokenized_query[term] = 1
return tokenized_query
def vector_length(vector):
temp = 0
for term, tf_idf_w in vector:
temp += pow(tf_idf_w, 2)
return pow(temp, 1 / 2)
def perform_search(query_title, query_description, title_dictionary, abstract_dictionary, postings_reader):
# If title is missing, return empty string
if query_title.strip() == '':
return ''
# If description is missing, still query but description is None
if query_description.strip() == '':
query_description = None
score = {}
query_title_weighted_tf_idf_table_for_title = {}
query_title_weighted_tf_idf_table_for_abstract = {}
query_description_weighted_tf_idf_table_for_title = {}
query_description_weighted_tf_idf_table_for_abstract = {}
title_doc_length_table = load_postings_by_term("TITLE DOC LENGTH TABLE", title_dictionary, postings_reader)
abstract_doc_length_table = load_postings_by_term("ABSTRACT DOC LENGTH TABLE", abstract_dictionary, postings_reader)
query_title_tokens = tokenize_query(query_title)
query_description_tokens = tokenize_query(query_description)
# calculating each term's weighted tf-idf in query
for title_term, qt_frequency in query_title_tokens.iteritems():
# only calculating score if term is indexed
tf_w = 1 + math.log(qt_frequency, 10)
if title_term in title_dictionary:
idf_in_title = math.log(len(title_doc_length_table) / (title_dictionary[title_term][0] * 1.0), 10)
query_title_weighted_tf_idf_table_for_title[title_term] = tf_w * idf_in_title
if title_term in abstract_dictionary:
idf_in_abstract = math.log(len(abstract_doc_length_table) / (abstract_dictionary[title_term][0] * 1.0), 10)
query_title_weighted_tf_idf_table_for_abstract[title_term] = tf_w * idf_in_abstract
for description_term, qd_frequency in query_description_tokens.iteritems():
# only calculating score if term is indexed
tf_w = 1 + math.log(qd_frequency, 10)
if description_term in title_dictionary:
idf_in_title = math.log(len(title_doc_length_table) / (title_dictionary[description_term][0] * 1.0), 10)
query_description_weighted_tf_idf_table_for_title[description_term] = tf_w * idf_in_title
if description_term in abstract_dictionary:
idf_in_abstract = math.log(
len(abstract_doc_length_table) / (abstract_dictionary[description_term][0] * 1.0), 10)
query_description_weighted_tf_idf_table_for_abstract[description_term] = tf_w * idf_in_abstract
# calculating query length
query_title_length_for_title = vector_length(query_title_weighted_tf_idf_table_for_title.iteritems())
query_title_length_for_abstract = vector_length(query_title_weighted_tf_idf_table_for_abstract.iteritems())
query_description_length_for_title = vector_length(query_description_weighted_tf_idf_table_for_title.iteritems())
query_description_length_for_abstract = vector_length(
query_description_weighted_tf_idf_table_for_abstract.iteritems())
# calculating cosine angle between two vectors
# between tilte query and docs' titles
title_to_title_matched_ids = set()
for term, tf_idf_w in query_title_weighted_tf_idf_table_for_title.iteritems():
title_postings = load_postings_by_term(term, title_dictionary, postings_reader)
for doc_id, d_tf_w in title_postings:
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_title_length_for_title * title_doc_length_table[doc_id]) * ZONE_WEIGHT_SAME
title_to_title_matched_ids.add(doc_id)
# between tilte query and docs' abstracts
for term, tf_idf_w in query_title_weighted_tf_idf_table_for_abstract.iteritems():
abstract_postings = load_postings_by_term(term, abstract_dictionary, postings_reader)
for doc_id, d_tf_w in abstract_postings:
if doc_id in title_to_title_matched_ids:
continue
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_title_length_for_abstract * abstract_doc_length_table[doc_id]) * ZONE_WEIGHT_CROSS
# between tilte description and docs' abstracts
description_to_abstracts_matched_ids = set()
for term, tf_idf_w in query_description_weighted_tf_idf_table_for_abstract.iteritems():
abstract_postings = load_postings_by_term(term, abstract_dictionary, postings_reader)
for doc_id, d_tf_w in abstract_postings:
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_description_length_for_abstract * abstract_doc_length_table[doc_id]) * ZONE_WEIGHT_SAME
description_to_abstracts_matched_ids.add(doc_id)
# between tilte description and docs' title
for term, tf_idf_w in query_description_weighted_tf_idf_table_for_title.iteritems():
title_postings = load_postings_by_term(term, title_dictionary, postings_reader)
for doc_id, d_tf_w in title_postings:
if doc_id in description_to_abstracts_matched_ids:
continue
if doc_id not in score:
score[doc_id] = 0
score[doc_id] += d_tf_w * tf_idf_w / (
query_description_length_for_title * title_doc_length_table[doc_id]) * ZONE_WEIGHT_CROSS
return score
"""
Processes the raw string query and retrieves at most 10 documents by its ID for the query
that are the most relevant to the query. The returned string is a space-delimitered doc IDs
in the order of relevance from highest to the lowest.
The relevance is determined by the accumulated score of each document's cosine similarity
between its document vector and the query vector. The ranking scheme for the algorithm is
lnc.ltc in SMART notation.
search(dict<str:int>, file, str) -> str
"""
def search_query(title_dictionary, abstract_dictionary, postings_reader, query_file):
"""
:rtype : dictionary of doc id to query
"""
query = ET.parse(query_file).getroot()
query_title = query.find('title').text
query_description = query.find('description').text.strip()
if query_description[:len(QUERY_DESCRIPTION_PREFIX)] == QUERY_DESCRIPTION_PREFIX:
query_description = query_description[len(QUERY_DESCRIPTION_PREFIX):]
score = perform_search(query_title, query_description, title_dictionary, abstract_dictionary, postings_reader)
# sorting by score from most to the least
result = score.items()
result.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
doc_id_map = load_postings_by_term("DOC ID MAP", title_dictionary, postings_reader)
directory = title_dictionary["DIRECTORY_PATH"]
for num in range(0, TOP_N_RESULT):
if num >= len(result):
break
else:
content = ET.parse(directory + doc_id_map[result[num][0]] + ".xml").getroot()
title = abstract = None
for child in content:
name = child.get("name")
if name == "Title":
title = child.text
elif name == "Abstract":
abstract = child.text
score_for_new_query = perform_search(title, abstract, title_dictionary, abstract_dictionary, postings_reader)
for doc_id in score_for_new_query:
if doc_id in score:
score[doc_id] += score_for_new_query[doc_id]
else:
score[doc_id] = score_for_new_query[doc_id]
# sorting by score from most to the least
result = score.items()
result.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
# Magnifying top N groups with a multiplier
# This approach is using the assumption that if another patent belonging to
# the same group as the current top N results, it should also mean that it is
# more likely to be relevant than others not within the same group
# top N category score multiplier
IPC_group_dictionary = load_postings_by_term("IPC GROUP DICTIONARY", title_dictionary, postings_reader)
target_id_multiplier = {} #contains {group:multiplier to be applied}
multiplied_results = []
counter = 0
for num in range(TOP_N_GROUP, 0, -1):
# check if there are any more items to fit N groups
if counter >= len(result):
break
else:
# resolve group of this top N ranked item
target_doc_id = result[counter][0]
counter += 1
target_group = IPC_group_dictionary[target_doc_id]
# check if group was already recorded before
if target_group not in target_id_multiplier:
target_id_multiplier[target_group] = 1 + (num * INCREMENT_MULTIPLIER)
else:
#repeated group, skip
num += 1
# apply corresponding multipliers to scores of matching group
for doc_id, score in result:
temp_list = []
if IPC_group_dictionary[doc_id] in target_id_multiplier:
temp_list.append(doc_id)
temp_list.append(score * target_id_multiplier[IPC_group_dictionary[doc_id]])
multiplied_results.append(temp_list)
else:
temp_list.append(doc_id)
temp_list.append(score)
multiplied_results.append(temp_list)
# sort again after adjusting scores
multiplied_results.sort(key=lambda docId_score_pair: docId_score_pair[1], reverse=True)
resultString = ""
#generate result string
for doc_id, score in multiplied_results:
''' # pruning of results based on threshold score
# this also had a negative impact on the results as it greatly reduces recall
# and is therefore commented out
# since scores are sorted, if reaches below threshold, stop appending
if score < PRUNE_THRESHOLD:
break
'''
resultString += doc_id_map[doc_id] + " "
return resultString[:-1]
def main(dictionary_file, postings_file, query_file, output_file):
(title_dictionary, abstract_dictionary) = pickle.load(open(dictionary_file, "rb"))
postings_reader = open(postings_file, "rb")
output = open(output_file, "w")
result = search_query(title_dictionary, abstract_dictionary, postings_reader, query_file)
output.write(result)
output.write('\n')
def usage():
print "usage: python search.py -d dictionary-file -p postings-file -q query-file -o output-file-of-results"
dictionary_file = postings_file = query_file = output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')
except getopt.GetoptError, err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-d':
dictionary_file = a
elif o == '-p':
postings_file = a
elif o == '-q':
query_file = a
elif o == '-o':
output_file = a
else:
assert False, "unhandled option"
if query_file == None or output_file == None or dictionary_file == None or postings_file == None:
usage()
sys.exit(2)
main(dictionary_file, postings_file, query_file, output_file)
|
import asyncio
from .compat import PY_350, PY_352
if not PY_350:
StopAsyncIteration = None # noqa
def create_future(*, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
try:
return loop.create_future()
except AttributeError:
return asyncio.Future(loop=loop)
class Scan:
def __init__(
self,
es,
query=None,
scroll='5m',
size=1000,
preserve_order=False,
clear_scroll=True,
*, loop=None,
**kwargs
):
self._loop = loop
self._es = es
if not preserve_order:
query = query.copy() if query else {}
query['sort'] = '_doc'
self._query = query
self._scroll = scroll
self._size = size
self._clear_scroll = clear_scroll
self._kwargs = kwargs
self._scroll_id = None
self._total = 0
self.__initial = True
self.__has_more = None
self.__found = 0
self.__scroll_hits = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __iter__(self): # noqa
return self
def __next__(self): # noqa
assert not self.__initial
if self.__scroll_hits is not None:
fut = create_future(loop=self._loop)
fut.set_result(self.__scroll_hits)
self.__scroll_hits = None
return fut
if not self.has_more:
raise StopIteration
return self.search()
if PY_350:
@asyncio.coroutine
def __aenter__(self): # noqa
yield from self.scroll()
return self
@asyncio.coroutine
def __aexit__(self, *exc_info): # noqa
yield from self.clear_scroll()
__aiter__ = __iter__
if not PY_352:
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self): # noqa
assert not self.__initial
if self.__scroll_hits is not None:
hits = self.__scroll_hits
self.__scroll_hits = None
else:
hits = yield from self.search()
if hits:
return hits
else:
raise StopAsyncIteration
@property
def scroll_id(self):
assert not self.__initial
return self._scroll_id
@property
def total(self):
assert not self.__initial
return self._total
@property
def has_more(self):
assert not self.__initial
if self._scroll_id is None:
return False
if self.__has_more is False:
return False
return True
@asyncio.coroutine
def scroll(self):
assert self.__initial
resp = yield from self._es.search(
body=self._query,
scroll=self._scroll,
size=self._size,
**self._kwargs
)
self.__initial = False
hits = resp['hits']['hits']
self._scroll_id = resp.get('_scroll_id')
self._total = resp['hits']['total']
self.__found += len(hits)
self.__has_more = self.__found < self._total
self.__scroll_hits = hits
return hits
@asyncio.coroutine
def search(self):
assert not self.__initial
resp = yield from self._es.scroll(
self._scroll_id, scroll=self._scroll,
)
hits = resp['hits']['hits']
self.__found += len(hits)
self.__has_more = self.__found < self._total
return hits
@asyncio.coroutine
def clear_scroll(self):
if self._scroll_id is not None and self._clear_scroll:
yield from self._es.clear_scroll(
body={
'scroll_id': [self._scroll_id],
},
)
Codestyle.
import asyncio
from .compat import PY_350, PY_352
if not PY_350:
StopAsyncIteration = None # noqa
def create_future(*, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
try:
return loop.create_future()
except AttributeError:
return asyncio.Future(loop=loop)
class Scan:
def __init__(
self,
es,
query=None,
scroll='5m',
size=1000,
preserve_order=False,
clear_scroll=True,
*,
loop=None,
**kwargs
):
self._loop = loop
self._es = es
if not preserve_order:
query = query.copy() if query else {}
query['sort'] = '_doc'
self._query = query
self._scroll = scroll
self._size = size
self._clear_scroll = clear_scroll
self._kwargs = kwargs
self._scroll_id = None
self._total = 0
self.__initial = True
self.__has_more = None
self.__found = 0
self.__scroll_hits = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
def __iter__(self): # noqa
return self
def __next__(self): # noqa
assert not self.__initial
if self.__scroll_hits is not None:
fut = create_future(loop=self._loop)
fut.set_result(self.__scroll_hits)
self.__scroll_hits = None
return fut
if not self.has_more:
raise StopIteration
return self.search()
if PY_350:
@asyncio.coroutine
def __aenter__(self): # noqa
yield from self.scroll()
return self
@asyncio.coroutine
def __aexit__(self, *exc_info): # noqa
yield from self.clear_scroll()
__aiter__ = __iter__
if not PY_352:
__aiter__ = asyncio.coroutine(__aiter__)
@asyncio.coroutine
def __anext__(self): # noqa
assert not self.__initial
if self.__scroll_hits is not None:
hits = self.__scroll_hits
self.__scroll_hits = None
else:
hits = yield from self.search()
if hits:
return hits
else:
raise StopAsyncIteration
@property
def scroll_id(self):
assert not self.__initial
return self._scroll_id
@property
def total(self):
assert not self.__initial
return self._total
@property
def has_more(self):
assert not self.__initial
if self._scroll_id is None:
return False
if self.__has_more is False:
return False
return True
@asyncio.coroutine
def scroll(self):
assert self.__initial
resp = yield from self._es.search(
body=self._query,
scroll=self._scroll,
size=self._size,
**self._kwargs
)
self.__initial = False
hits = resp['hits']['hits']
self._scroll_id = resp.get('_scroll_id')
self._total = resp['hits']['total']
self.__found += len(hits)
self.__has_more = self.__found < self._total
self.__scroll_hits = hits
return hits
@asyncio.coroutine
def search(self):
assert not self.__initial
resp = yield from self._es.scroll(
self._scroll_id, scroll=self._scroll,
)
hits = resp['hits']['hits']
self.__found += len(hits)
self.__has_more = self.__found < self._total
return hits
@asyncio.coroutine
def clear_scroll(self):
if self._scroll_id is not None and self._clear_scroll:
yield from self._es.clear_scroll(
body={'scroll_id': [self._scroll_id]},
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import copy
import cv2
import sys
import time
import traceback
from ikalog.utils import *
from . import scenes
# The IkaLog core engine.
#
class IkaEngine:
def _profile_dump_scenes(self):
for scene in self.scenes:
print('%4.3fs %s' % (scene._prof_time_took, scene))
def _profile_dump(self):
self._profile_dump_scenes()
def enable_profile(self):
self._enable_profile = True
def disble_profile(self):
self._enable_profile = False
def on_game_individual_result(self, context):
self.session_close_wdt = context['engine']['msec'] + (20 * 1000)
def on_result_gears(self, context):
if self.session_close_wdt is not None:
self.session_close_wdt = context['engine']['msec'] + (1 * 1000)
def on_game_lost_sync(self, context):
self.session_abort()
def dprint(self, text):
print(text, file=sys.stderr)
def call_plugins(self, event_name, params=None, debug=False):
if debug:
self.dprint('call plug-in hook (%s):' % event_name)
for op in self.output_plugins:
if hasattr(op, event_name):
if debug:
self.dprint('Call %s' % op.__class__.__name__)
try:
if params is None:
getattr(op, event_name)(self.context)
else:
getattr(op, event_name)(self.context, params)
except:
self.dprint('%s.%s() raised a exception >>>>' %
(op.__class__.__name__, event_name))
self.dprint(traceback.format_exc())
self.dprint('<<<<<')
elif hasattr(op, 'onUncatchedEvent'):
if debug:
self.dprint(
'call plug-in hook (UncatchedEvent, %s):' % event_name)
try:
getattr(op, 'onUncatchedEvent')(event_name, self.context)
except:
self.dprint('%s.%s() raised a exception >>>>' %
(op.__class__.__name__, event_name))
self.dprint(traceback.format_exc())
self.dprint('<<<<<')
def call_plugins_later(self, event_name, params=None, debug=False):
self._event_queue.append((event_name, params))
def read_next_frame(self, skip_frames=0):
for i in range(skip_frames):
frame = self.capture.read_frame()
frame = self.capture.read_frame()
while frame is None:
self.call_plugins('on_frame_read_failed')
if self._stop:
return None, None
cv2.waitKey(1000)
frame = self.capture.read_frame()
t = self.capture.get_current_timestamp()
self.context['engine']['msec'] = t
self.context['engine']['frame'] = frame
self.context['engine']['preview'] = copy.deepcopy(frame)
self.call_plugins('on_debug_read_next_frame')
return frame, t
def stop(self):
self.call_plugins('on_stop')
self._stop = True
def reset(self):
# Initalize the context
self.context['game'] = {
'map': None,
'rule': None,
'won': None,
'players': None,
'kills': 0,
'dead': False,
'death_reasons': {},
'inkling_state': [None, None],
'livesTrack': [],
'towerTrack': [],
}
self.call_plugins('on_game_reset')
def create_context(self):
self.context = {
'engine': {
'epoch_time': None,
'frame': None,
'service': {
'call_plugins': self.call_plugins,
'call_plugins_later': self.call_plugins,
# For backward compatibility
'callPlugins': self.call_plugins,
}
},
'scenes': {
},
'config': {
},
'lobby': {
}
}
self.reset()
self.session_close_wdt = None
def session_close(self):
self.session_close_wdt = None
self.call_plugins('on_game_session_end')
self.reset()
def session_abort(self):
self.session_close_wdt = None
self.call_plugins('on_game_session_abort')
self.reset()
def process_scene(self, scene):
context = self.context
try:
scene.new_frame(context)
scene.match(context)
except:
if self._abort_at_scene_exception:
raise
self.dprint('%s raised a exception >>>>' %
(scene.__class__.__name__))
self.dprint(traceback.format_exc())
self.dprint('<<<<<')
def find_scene_object(self, scene_class_name):
for scene in self.scenes:
if scene.__class__.__name__ == scene_class_name:
return scene
return None
def process_frame(self):
context = self.context
frame, t = self.read_next_frame()
if frame is None:
return False
context['engine']['inGame'] = self.find_scene_object(
'GameTimerIcon').match(context)
self.call_plugins('on_frame_read')
for scene in self.scenes:
self.process_scene(scene)
if self.session_close_wdt is not None:
if self.session_close_wdt < context['engine']['msec']:
self.dprint('Watchdog fired. Closing current session')
self.session_close()
key = None
self.call_plugins('on_draw_preview')
self.call_plugins('on_show_preview')
# FixMe: Since on_frame_next and on_key_press has non-standard arguments,
# self.call_plugins() doesn't work for those.
for op in self.output_plugins:
if hasattr(op, "on_frame_next"):
try:
key = op.on_frame_next(context)
except:
pass
for op in self.output_plugins:
if hasattr(op, "on_key_press"):
try:
op.on_key_press(context, key)
except:
pass
while len(self._event_queue) > 0:
event = self._event_queue.pop(0)
self.call_plugins(event_name=event[0], params=event[1])
def _main_loop(self):
while not self._stop:
if self._pause:
time.sleep(0.5)
continue
try:
self.process_frame()
except EOFError:
# EOF. Close session if close_session_at_eof is set.
if self.close_session_at_eof:
if self.session_close_wdt is not None:
self.dprint('Closing current session at EOF')
self.session_close()
else:
self.session_abort()
self._stop = True
cv2.destroyAllWindows()
def run(self):
try:
self._main_loop()
finally:
if self._enable_profile:
self._profile_dump()
def set_capture(self, capture):
self.capture = capture
self.context['engine']['input_class'] = self.capture.__class__.__name__
def set_epoch_time(self, epoch_time):
self.context['engine']['epoch_time'] = epoch_time
def set_plugins(self, plugins):
self.output_plugins = [self]
self.output_plugins.extend(self.scenes)
self.output_plugins.extend(plugins)
def pause(self, pause):
self._pause = pause
def _initialize_scenes(self):
self.scenes = [
scenes.GameTimerIcon(self),
scenes.GameStart(self),
scenes.GameGoSign(self),
scenes.GameKill(self),
scenes.GameDead(self),
scenes.GameOutOfBound(self),
scenes.GameFinish(self),
scenes.GameSpecialGauge(self),
scenes.GameSpecialWeapon(self),
scenes.GameRankedBattleEvents(self),
scenes.PaintScoreTracker(self),
scenes.ObjectiveTracker(self),
scenes.SplatzoneTracker(self),
scenes.InklingsTracker(self),
scenes.ResultJudge(self),
scenes.ResultDetail(self),
scenes.ResultUdemae(self),
scenes.ResultGears(self),
scenes.ResultFesta(self),
scenes.Lobby(self),
# scenes.Downie(self),
scenes.Blank(self),
]
def __init__(self, enable_profile=False, abort_at_scene_exception=False):
self._initialize_scenes()
self.output_plugins = [self]
self.last_capture = time.time() - 100
self._stop = False
self._pause = True
self._event_queue = []
self.close_session_at_eof = False
self._enable_profile = enable_profile
self._abort_at_scene_exception = abort_at_scene_exception
self.create_context()
ikalog/engine: Add exception logging
Signed-off-by: Takeshi HASEGAWA <80595b5c49522665976d35e515d02ac963124d00@gmail.com>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import copy
import cv2
import pprint
import sys
import time
import traceback
from ikalog.utils import *
from . import scenes
# The IkaLog core engine.
#
class IkaEngine:
# Profiling
def _profile_dump_scenes(self):
for scene in self.scenes:
print('%4.3fs %s' % (scene._prof_time_took, scene))
def _profile_dump(self):
self._profile_dump_scenes()
def enable_profile(self):
self._enable_profile = True
def disble_profile(self):
self._enable_profile = False
# Exception Logging
def _exception_log_init(self, context):
context['engine']['exceptions_log'] = {}
def _exception_log_dump(self, context):
if not 'exceptions_log' in context['engine']:
self._exception_log_init(context)
if len(context['engine']['exceptions_log']) > 0:
pprint.pprint(context['engine']['exceptions_log'])
def _exception_log_append(self, context, name, text):
if not 'exceptions_log' in context['engine']:
self._exception_log_init(context)
d = context['engine']['exceptions_log']
count = d.get(name, {'count': 0})['count']
d[name] = {
'count': count + 1,
'text': text,
}
#
def on_game_individual_result(self, context):
self.session_close_wdt = context['engine']['msec'] + (20 * 1000)
def on_result_gears(self, context):
if self.session_close_wdt is not None:
self.session_close_wdt = context['engine']['msec'] + (1 * 1000)
def on_game_lost_sync(self, context):
self.session_abort()
def dprint(self, text):
print(text, file=sys.stderr)
def call_plugins(self, event_name, params=None, debug=False):
if debug:
self.dprint('call plug-in hook (%s):' % event_name)
for op in self.output_plugins:
if hasattr(op, event_name):
if debug:
self.dprint('Call %s' % op.__class__.__name__)
try:
if params is None:
getattr(op, event_name)(self.context)
else:
getattr(op, event_name)(self.context, params)
except:
self.dprint('%s.%s() raised a exception >>>>' %
(op.__class__.__name__, event_name))
self.dprint(traceback.format_exc())
self.dprint('<<<<<')
elif hasattr(op, 'onUncatchedEvent'):
if debug:
self.dprint(
'call plug-in hook (UncatchedEvent, %s):' % event_name)
try:
getattr(op, 'onUncatchedEvent')(event_name, self.context)
except:
self.dprint('%s.%s() raised a exception >>>>' %
(op.__class__.__name__, event_name))
self.dprint(traceback.format_exc())
self.dprint('<<<<<')
def call_plugins_later(self, event_name, params=None, debug=False):
self._event_queue.append((event_name, params))
def read_next_frame(self, skip_frames=0):
for i in range(skip_frames):
frame = self.capture.read_frame()
frame = self.capture.read_frame()
while frame is None:
self.call_plugins('on_frame_read_failed')
if self._stop:
return None, None
cv2.waitKey(1000)
frame = self.capture.read_frame()
t = self.capture.get_current_timestamp()
self.context['engine']['msec'] = t
self.context['engine']['frame'] = frame
self.context['engine']['preview'] = copy.deepcopy(frame)
self.call_plugins('on_debug_read_next_frame')
return frame, t
def stop(self):
self.call_plugins('on_stop')
self._stop = True
def reset(self):
# Initalize the context
self.context['game'] = {
'map': None,
'rule': None,
'won': None,
'players': None,
'kills': 0,
'dead': False,
'death_reasons': {},
'inkling_state': [None, None],
'livesTrack': [],
'towerTrack': [],
}
self.call_plugins('on_game_reset')
self._exception_log_init(self.context)
def create_context(self):
self.context = {
'engine': {
'epoch_time': None,
'frame': None,
'service': {
'call_plugins': self.call_plugins,
'call_plugins_later': self.call_plugins,
# For backward compatibility
'callPlugins': self.call_plugins,
},
'exceptions_log': {
},
},
'scenes': {
},
'config': {
},
'lobby': {
}
}
self.reset()
self.session_close_wdt = None
def session_close(self):
self.session_close_wdt = None
self.call_plugins('on_game_session_end')
self.reset()
def session_abort(self):
self.session_close_wdt = None
self.call_plugins('on_game_session_abort')
self.reset()
def process_scene(self, scene):
context = self.context
try:
scene.new_frame(context)
scene.match(context)
except:
if self._abort_at_scene_exception:
raise
scene_name = scene.__class__.__name__
desc = traceback.format_exc()
self.dprint('%s raised a exception >>>>' % scene_name)
self.dprint(desc)
self.dprint('<<<<<')
self._exception_log_append(context, scene_name, desc)
def find_scene_object(self, scene_class_name):
for scene in self.scenes:
if scene.__class__.__name__ == scene_class_name:
return scene
return None
def process_frame(self):
context = self.context
frame, t = self.read_next_frame()
if frame is None:
return False
context['engine']['inGame'] = self.find_scene_object(
'GameTimerIcon').match(context)
self.call_plugins('on_frame_read')
for scene in self.scenes:
self.process_scene(scene)
if self.session_close_wdt is not None:
if self.session_close_wdt < context['engine']['msec']:
self.dprint('Watchdog fired. Closing current session')
self.session_close()
key = None
self.call_plugins('on_draw_preview')
self.call_plugins('on_show_preview')
# FixMe: Since on_frame_next and on_key_press has non-standard arguments,
# self.call_plugins() doesn't work for those.
for op in self.output_plugins:
if hasattr(op, "on_frame_next"):
try:
key = op.on_frame_next(context)
except:
pass
for op in self.output_plugins:
if hasattr(op, "on_key_press"):
try:
op.on_key_press(context, key)
except:
pass
while len(self._event_queue) > 0:
event = self._event_queue.pop(0)
self.call_plugins(event_name=event[0], params=event[1])
def _main_loop(self):
while not self._stop:
if self._pause:
time.sleep(0.5)
continue
try:
self.process_frame()
except EOFError:
# EOF. Close session if close_session_at_eof is set.
if self.close_session_at_eof:
if self.session_close_wdt is not None:
self.dprint('Closing current session at EOF')
self.session_close()
else:
self.session_abort()
self._stop = True
cv2.destroyAllWindows()
def run(self):
try:
self._main_loop()
finally:
if self._enable_profile:
self._profile_dump()
if 1:
self._exception_log_dump(self.context)
def set_capture(self, capture):
self.capture = capture
self.context['engine']['input_class'] = self.capture.__class__.__name__
def set_epoch_time(self, epoch_time):
self.context['engine']['epoch_time'] = epoch_time
def set_plugins(self, plugins):
self.output_plugins = [self]
self.output_plugins.extend(self.scenes)
self.output_plugins.extend(plugins)
def pause(self, pause):
self._pause = pause
def _initialize_scenes(self):
self.scenes = [
scenes.GameTimerIcon(self),
scenes.GameStart(self),
scenes.GameGoSign(self),
scenes.GameKill(self),
scenes.GameDead(self),
scenes.GameOutOfBound(self),
scenes.GameFinish(self),
scenes.GameSpecialGauge(self),
scenes.GameSpecialWeapon(self),
scenes.GameRankedBattleEvents(self),
scenes.PaintScoreTracker(self),
scenes.ObjectiveTracker(self),
scenes.SplatzoneTracker(self),
scenes.InklingsTracker(self),
scenes.ResultJudge(self),
scenes.ResultDetail(self),
scenes.ResultUdemae(self),
scenes.ResultGears(self),
scenes.ResultFesta(self),
scenes.Lobby(self),
# scenes.Downie(self),
scenes.Blank(self),
]
def __init__(self, enable_profile=False, abort_at_scene_exception=False):
self._initialize_scenes()
self.output_plugins = [self]
self.last_capture = time.time() - 100
self._stop = False
self._pause = True
self._event_queue = []
self.close_session_at_eof = False
self._enable_profile = enable_profile
self._abort_at_scene_exception = abort_at_scene_exception
self.create_context()
|
import csv
import json
import re
from io import StringIO
import django_filters
from django import forms
from django.conf import settings
from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import Count
from django.forms import BoundField
from django.urls import reverse
from utilities.choices import unpack_grouped_choices
from utilities.utils import content_type_name
from utilities.validators import EnhancedURLValidator
from . import widgets
from .constants import *
from .utils import expand_alphanumeric_pattern, expand_ipaddress_pattern
__all__ = (
'CommentField',
'ContentTypeChoiceField',
'ContentTypeMultipleChoiceField',
'CSVChoiceField',
'CSVContentTypeField',
'CSVDataField',
'CSVFileField',
'CSVModelChoiceField',
'CSVTypedChoiceField',
'DynamicModelChoiceField',
'DynamicModelMultipleChoiceField',
'ExpandableIPAddressField',
'ExpandableNameField',
'JSONField',
'LaxURLField',
'SlugField',
'TagFilterField',
)
class CommentField(forms.CharField):
"""
A textarea with support for Markdown rendering. Exists mostly just to add a standard help_text.
"""
widget = forms.Textarea
default_label = ''
# TODO: Port Markdown cheat sheet to internal documentation
default_helptext = '<i class="mdi mdi-information-outline"></i> '\
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1">'\
'Markdown</a> syntax is supported'
def __init__(self, *args, **kwargs):
required = kwargs.pop('required', False)
label = kwargs.pop('label', self.default_label)
help_text = kwargs.pop('help_text', self.default_helptext)
super().__init__(required=required, label=label, help_text=help_text, *args, **kwargs)
class SlugField(forms.SlugField):
"""
Extend the built-in SlugField to automatically populate from a field called `name` unless otherwise specified.
"""
def __init__(self, slug_source='name', *args, **kwargs):
label = kwargs.pop('label', "Slug")
help_text = kwargs.pop('help_text', "URL-friendly unique shorthand")
widget = kwargs.pop('widget', widgets.SlugWidget)
super().__init__(label=label, help_text=help_text, widget=widget, *args, **kwargs)
self.widget.attrs['slug-source'] = slug_source
class TagFilterField(forms.MultipleChoiceField):
"""
A filter field for the tags of a model. Only the tags used by a model are displayed.
:param model: The model of the filter
"""
widget = widgets.StaticSelect2Multiple
def __init__(self, model, *args, **kwargs):
def get_choices():
tags = model.tags.annotate(
count=Count('extras_taggeditem_items')
).order_by('name')
return [
(str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags
]
# Choices are fetched each time the form is initialized
super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)
class LaxURLField(forms.URLField):
"""
Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names
(e.g. http://myserver/ is valid)
"""
default_validators = [EnhancedURLValidator()]
class JSONField(_JSONField):
"""
Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Enter context data in <a href="https://json.org/">JSON</a> format.'
self.widget.attrs['placeholder'] = ''
def prepare_value(self, value):
if isinstance(value, InvalidJSONInput):
return value
if value is None:
return ''
return json.dumps(value, sort_keys=True, indent=4)
class ContentTypeChoiceMixin:
def __init__(self, queryset, *args, **kwargs):
# Order ContentTypes by app_label
queryset = queryset.order_by('app_label', 'model')
super().__init__(queryset, *args, **kwargs)
def label_from_instance(self, obj):
try:
return content_type_name(obj)
except AttributeError:
return super().label_from_instance(obj)
class ContentTypeChoiceField(ContentTypeChoiceMixin, forms.ModelChoiceField):
pass
class ContentTypeMultipleChoiceField(ContentTypeChoiceMixin, forms.ModelMultipleChoiceField):
pass
#
# CSV fields
#
class CSVDataField(forms.CharField):
"""
A CharField (rendered as a Textarea) which accepts CSV-formatted data. It returns data as a two-tuple: The first
item is a dictionary of column headers, mapping field names to the attribute by which they match a related object
(where applicable). The second item is a list of dictionaries, each representing a discrete row of CSV data.
:param from_form: The form from which the field derives its validation rules.
"""
widget = forms.Textarea
def __init__(self, from_form, *args, **kwargs):
form = from_form()
self.model = form.Meta.model
self.fields = form.fields
self.required_fields = [
name for name, field in form.fields.items() if field.required
]
super().__init__(*args, **kwargs)
self.strip = False
if not self.label:
self.label = ''
if not self.initial:
self.initial = ','.join(self.required_fields) + '\n'
if not self.help_text:
self.help_text = 'Enter the list of column headers followed by one line per record to be imported, using ' \
'commas to separate values. Multi-line data and values containing commas may be wrapped ' \
'in double quotes.'
def to_python(self, value):
records = []
reader = csv.reader(StringIO(value.strip()))
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
headers = {}
for header in next(reader):
if '.' in header:
field, to_field = header.split('.', 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(
f"Row {i}: Expected {len(headers)} columns but found {len(row)}"
)
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate(self, value):
headers, records = value
# Validate provided column headers
for field, to_field in headers.items():
if field not in self.fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(self.fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(self.fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in self.required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
return value
class CSVFileField(forms.FileField):
"""
A FileField (rendered as a file input button) which accepts a file containing CSV-formatted data. It returns
data as a two-tuple: The first item is a dictionary of column headers, mapping field names to the attribute
by which they match a related object (where applicable). The second item is a list of dictionaries, each
representing a discrete row of CSV data.
:param from_form: The form from which the field derives its validation rules.
"""
def __init__(self, from_form, *args, **kwargs):
form = from_form()
self.model = form.Meta.model
self.fields = form.fields
self.required_fields = [
name for name, field in form.fields.items() if field.required
]
super().__init__(*args, **kwargs)
def to_python(self, file):
records = []
file.seek(0)
csv_str = file.read().decode('utf-8')
reader = csv.reader(csv_str.splitlines())
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
headers = {}
for header in next(reader):
if '.' in header:
field, to_field = header.split('.', 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(
f"Row {i}: Expected {len(headers)} columns but found {len(row)}"
)
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate(self, value):
headers, records = value
# Validate provided column headers
for field, to_field in headers.items():
if field not in self.fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(self.fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(self.fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in self.required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
return value
class CSVChoiceField(forms.ChoiceField):
"""
Invert the provided set of choices to take the human-friendly label as input, and return the database value.
"""
STATIC_CHOICES = True
def __init__(self, *, choices=(), **kwargs):
super().__init__(choices=choices, **kwargs)
self.choices = unpack_grouped_choices(choices)
class CSVTypedChoiceField(forms.TypedChoiceField):
STATIC_CHOICES = True
class CSVModelChoiceField(forms.ModelChoiceField):
"""
Provides additional validation for model choices entered as CSV data.
"""
default_error_messages = {
'invalid_choice': 'Object not found.',
}
def to_python(self, value):
try:
return super().to_python(value)
except MultipleObjectsReturned:
raise forms.ValidationError(
f'"{value}" is not a unique value for this field; multiple objects were found'
)
class CSVContentTypeField(CSVModelChoiceField):
"""
Reference a ContentType in the form <app>.<model>
"""
STATIC_CHOICES = True
def prepare_value(self, value):
return f'{value.app_label}.{value.model}'
def to_python(self, value):
try:
app_label, model = value.split('.')
except ValueError:
raise forms.ValidationError(f'Object type must be specified as "<app>.<model>"')
try:
return self.queryset.get(app_label=app_label, model=model)
except ObjectDoesNotExist:
raise forms.ValidationError(f'Invalid object type')
#
# Expansion fields
#
class ExpandableNameField(forms.CharField):
"""
A field which allows for numeric range expansion
Example: 'Gi0/[1-3]' => ['Gi0/1', 'Gi0/2', 'Gi0/3']
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = """
Alphanumeric ranges are supported for bulk creation. Mixed cases and types within a single range
are not supported. Examples:
<ul>
<li><code>[ge,xe]-0/0/[0-9]</code></li>
<li><code>e[0-3][a-d,f]</code></li>
</ul>
"""
def to_python(self, value):
if not value:
return ''
if re.search(ALPHANUMERIC_EXPANSION_PATTERN, value):
return list(expand_alphanumeric_pattern(value))
return [value]
class ExpandableIPAddressField(forms.CharField):
"""
A field which allows for expansion of IP address ranges
Example: '192.0.2.[1-254]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.3/24' ... '192.0.2.254/24']
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Specify a numeric range to create multiple IPs.<br />'\
'Example: <code>192.0.2.[1,5,100-254]/24</code>'
def to_python(self, value):
# Hackish address family detection but it's all we have to work with
if '.' in value and re.search(IP4_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 4))
elif ':' in value and re.search(IP6_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 6))
return [value]
#
# Dynamic fields
#
class DynamicModelChoiceMixin:
"""
:param display_field: The name of the attribute of an API response object to display in the selection list
:param query_params: A dictionary of additional key/value pairs to attach to the API request
:param initial_params: A dictionary of child field references to use for selecting a parent field's initial value
:param null_option: The string used to represent a null selection (if any)
:param disabled_indicator: The name of the field which, if populated, will disable selection of the
choice (optional)
"""
filter = django_filters.ModelChoiceFilter
widget = widgets.APISelect
# TODO: Remove display_field in v3.0
def __init__(self, display_field='display', query_params=None, initial_params=None, null_option=None,
disabled_indicator=None, *args, **kwargs):
self.display_field = display_field
self.query_params = query_params or {}
self.initial_params = initial_params or {}
self.null_option = null_option
self.disabled_indicator = disabled_indicator
# to_field_name is set by ModelChoiceField.__init__(), but we need to set it early for reference
# by widget_attrs()
self.to_field_name = kwargs.get('to_field_name')
super().__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = {
'display-field': self.display_field,
}
# Set value-field attribute if the field specifies to_field_name
if self.to_field_name:
attrs['value-field'] = self.to_field_name
# Set the string used to represent a null option
if self.null_option is not None:
attrs['data-null-option'] = self.null_option
# Set the disabled indicator, if any
if self.disabled_indicator is not None:
attrs['disabled-indicator'] = self.disabled_indicator
# Attach any static query parameters
for key, value in self.query_params.items():
widget.add_query_param(key, value)
return attrs
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Set initial value based on prescribed child fields (if not already set)
if not self.initial and self.initial_params:
filter_kwargs = {}
for kwarg, child_field in self.initial_params.items():
value = form.initial.get(child_field.lstrip('$'))
if value:
filter_kwargs[kwarg] = value
if filter_kwargs:
self.initial = self.queryset.filter(**filter_kwargs).first()
# Modify the QuerySet of the field before we return it. Limit choices to any data already bound: Options
# will be populated on-demand via the APISelect widget.
data = bound_field.value()
if data:
field_name = getattr(self, 'to_field_name') or 'pk'
filter = self.filter(field_name=field_name)
try:
self.queryset = filter.filter(self.queryset, data)
except TypeError:
# Catch any error caused by invalid initial data passed from the user
self.queryset = self.queryset.none()
else:
self.queryset = self.queryset.none()
# Set the data URL on the APISelect widget (if not already set)
widget = bound_field.field.widget
if not widget.attrs.get('data-url'):
app_label = self.queryset.model._meta.app_label
model_name = self.queryset.model._meta.model_name
data_url = reverse('{}-api:{}-list'.format(app_label, model_name))
widget.attrs['data-url'] = data_url
return bound_field
class DynamicModelChoiceField(DynamicModelChoiceMixin, forms.ModelChoiceField):
"""
Override get_bound_field() to avoid pre-populating field choices with a SQL query. The field will be
rendered only with choices set via bound data. Choices are populated on-demand via the APISelect widget.
"""
def clean(self, value):
"""
When null option is enabled and "None" is sent as part of a form to be submitted, it is sent as the
string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.
"""
if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:
return None
return super().clean(value)
class DynamicModelMultipleChoiceField(DynamicModelChoiceMixin, forms.ModelMultipleChoiceField):
"""
A multiple-choice version of DynamicModelChoiceField.
"""
filter = django_filters.ModelMultipleChoiceFilter
widget = widgets.APISelectMultiple
removed unnecessary use of seek()
import csv
import json
import re
from io import StringIO
import django_filters
from django import forms
from django.conf import settings
from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db.models import Count
from django.forms import BoundField
from django.urls import reverse
from utilities.choices import unpack_grouped_choices
from utilities.utils import content_type_name
from utilities.validators import EnhancedURLValidator
from . import widgets
from .constants import *
from .utils import expand_alphanumeric_pattern, expand_ipaddress_pattern
__all__ = (
'CommentField',
'ContentTypeChoiceField',
'ContentTypeMultipleChoiceField',
'CSVChoiceField',
'CSVContentTypeField',
'CSVDataField',
'CSVFileField',
'CSVModelChoiceField',
'CSVTypedChoiceField',
'DynamicModelChoiceField',
'DynamicModelMultipleChoiceField',
'ExpandableIPAddressField',
'ExpandableNameField',
'JSONField',
'LaxURLField',
'SlugField',
'TagFilterField',
)
class CommentField(forms.CharField):
"""
A textarea with support for Markdown rendering. Exists mostly just to add a standard help_text.
"""
widget = forms.Textarea
default_label = ''
# TODO: Port Markdown cheat sheet to internal documentation
default_helptext = '<i class="mdi mdi-information-outline"></i> '\
'<a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1">'\
'Markdown</a> syntax is supported'
def __init__(self, *args, **kwargs):
required = kwargs.pop('required', False)
label = kwargs.pop('label', self.default_label)
help_text = kwargs.pop('help_text', self.default_helptext)
super().__init__(required=required, label=label, help_text=help_text, *args, **kwargs)
class SlugField(forms.SlugField):
"""
Extend the built-in SlugField to automatically populate from a field called `name` unless otherwise specified.
"""
def __init__(self, slug_source='name', *args, **kwargs):
label = kwargs.pop('label', "Slug")
help_text = kwargs.pop('help_text', "URL-friendly unique shorthand")
widget = kwargs.pop('widget', widgets.SlugWidget)
super().__init__(label=label, help_text=help_text, widget=widget, *args, **kwargs)
self.widget.attrs['slug-source'] = slug_source
class TagFilterField(forms.MultipleChoiceField):
"""
A filter field for the tags of a model. Only the tags used by a model are displayed.
:param model: The model of the filter
"""
widget = widgets.StaticSelect2Multiple
def __init__(self, model, *args, **kwargs):
def get_choices():
tags = model.tags.annotate(
count=Count('extras_taggeditem_items')
).order_by('name')
return [
(str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags
]
# Choices are fetched each time the form is initialized
super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)
class LaxURLField(forms.URLField):
"""
Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names
(e.g. http://myserver/ is valid)
"""
default_validators = [EnhancedURLValidator()]
class JSONField(_JSONField):
"""
Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Enter context data in <a href="https://json.org/">JSON</a> format.'
self.widget.attrs['placeholder'] = ''
def prepare_value(self, value):
if isinstance(value, InvalidJSONInput):
return value
if value is None:
return ''
return json.dumps(value, sort_keys=True, indent=4)
class ContentTypeChoiceMixin:
def __init__(self, queryset, *args, **kwargs):
# Order ContentTypes by app_label
queryset = queryset.order_by('app_label', 'model')
super().__init__(queryset, *args, **kwargs)
def label_from_instance(self, obj):
try:
return content_type_name(obj)
except AttributeError:
return super().label_from_instance(obj)
class ContentTypeChoiceField(ContentTypeChoiceMixin, forms.ModelChoiceField):
pass
class ContentTypeMultipleChoiceField(ContentTypeChoiceMixin, forms.ModelMultipleChoiceField):
pass
#
# CSV fields
#
class CSVDataField(forms.CharField):
"""
A CharField (rendered as a Textarea) which accepts CSV-formatted data. It returns data as a two-tuple: The first
item is a dictionary of column headers, mapping field names to the attribute by which they match a related object
(where applicable). The second item is a list of dictionaries, each representing a discrete row of CSV data.
:param from_form: The form from which the field derives its validation rules.
"""
widget = forms.Textarea
def __init__(self, from_form, *args, **kwargs):
form = from_form()
self.model = form.Meta.model
self.fields = form.fields
self.required_fields = [
name for name, field in form.fields.items() if field.required
]
super().__init__(*args, **kwargs)
self.strip = False
if not self.label:
self.label = ''
if not self.initial:
self.initial = ','.join(self.required_fields) + '\n'
if not self.help_text:
self.help_text = 'Enter the list of column headers followed by one line per record to be imported, using ' \
'commas to separate values. Multi-line data and values containing commas may be wrapped ' \
'in double quotes.'
def to_python(self, value):
records = []
reader = csv.reader(StringIO(value.strip()))
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
headers = {}
for header in next(reader):
if '.' in header:
field, to_field = header.split('.', 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(
f"Row {i}: Expected {len(headers)} columns but found {len(row)}"
)
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate(self, value):
headers, records = value
# Validate provided column headers
for field, to_field in headers.items():
if field not in self.fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(self.fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(self.fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in self.required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
return value
class CSVFileField(forms.FileField):
"""
A FileField (rendered as a file input button) which accepts a file containing CSV-formatted data. It returns
data as a two-tuple: The first item is a dictionary of column headers, mapping field names to the attribute
by which they match a related object (where applicable). The second item is a list of dictionaries, each
representing a discrete row of CSV data.
:param from_form: The form from which the field derives its validation rules.
"""
def __init__(self, from_form, *args, **kwargs):
form = from_form()
self.model = form.Meta.model
self.fields = form.fields
self.required_fields = [
name for name, field in form.fields.items() if field.required
]
super().__init__(*args, **kwargs)
def to_python(self, file):
records = []
csv_str = file.read().decode('utf-8')
reader = csv.reader(csv_str.splitlines())
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
headers = {}
for header in next(reader):
if '.' in header:
field, to_field = header.split('.', 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(
f"Row {i}: Expected {len(headers)} columns but found {len(row)}"
)
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate(self, value):
headers, records = value
# Validate provided column headers
for field, to_field in headers.items():
if field not in self.fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(self.fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(self.fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in self.required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
return value
class CSVChoiceField(forms.ChoiceField):
"""
Invert the provided set of choices to take the human-friendly label as input, and return the database value.
"""
STATIC_CHOICES = True
def __init__(self, *, choices=(), **kwargs):
super().__init__(choices=choices, **kwargs)
self.choices = unpack_grouped_choices(choices)
class CSVTypedChoiceField(forms.TypedChoiceField):
STATIC_CHOICES = True
class CSVModelChoiceField(forms.ModelChoiceField):
"""
Provides additional validation for model choices entered as CSV data.
"""
default_error_messages = {
'invalid_choice': 'Object not found.',
}
def to_python(self, value):
try:
return super().to_python(value)
except MultipleObjectsReturned:
raise forms.ValidationError(
f'"{value}" is not a unique value for this field; multiple objects were found'
)
class CSVContentTypeField(CSVModelChoiceField):
"""
Reference a ContentType in the form <app>.<model>
"""
STATIC_CHOICES = True
def prepare_value(self, value):
return f'{value.app_label}.{value.model}'
def to_python(self, value):
try:
app_label, model = value.split('.')
except ValueError:
raise forms.ValidationError(f'Object type must be specified as "<app>.<model>"')
try:
return self.queryset.get(app_label=app_label, model=model)
except ObjectDoesNotExist:
raise forms.ValidationError(f'Invalid object type')
#
# Expansion fields
#
class ExpandableNameField(forms.CharField):
"""
A field which allows for numeric range expansion
Example: 'Gi0/[1-3]' => ['Gi0/1', 'Gi0/2', 'Gi0/3']
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = """
Alphanumeric ranges are supported for bulk creation. Mixed cases and types within a single range
are not supported. Examples:
<ul>
<li><code>[ge,xe]-0/0/[0-9]</code></li>
<li><code>e[0-3][a-d,f]</code></li>
</ul>
"""
def to_python(self, value):
if not value:
return ''
if re.search(ALPHANUMERIC_EXPANSION_PATTERN, value):
return list(expand_alphanumeric_pattern(value))
return [value]
class ExpandableIPAddressField(forms.CharField):
"""
A field which allows for expansion of IP address ranges
Example: '192.0.2.[1-254]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.3/24' ... '192.0.2.254/24']
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.help_text:
self.help_text = 'Specify a numeric range to create multiple IPs.<br />'\
'Example: <code>192.0.2.[1,5,100-254]/24</code>'
def to_python(self, value):
# Hackish address family detection but it's all we have to work with
if '.' in value and re.search(IP4_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 4))
elif ':' in value and re.search(IP6_EXPANSION_PATTERN, value):
return list(expand_ipaddress_pattern(value, 6))
return [value]
#
# Dynamic fields
#
class DynamicModelChoiceMixin:
"""
:param display_field: The name of the attribute of an API response object to display in the selection list
:param query_params: A dictionary of additional key/value pairs to attach to the API request
:param initial_params: A dictionary of child field references to use for selecting a parent field's initial value
:param null_option: The string used to represent a null selection (if any)
:param disabled_indicator: The name of the field which, if populated, will disable selection of the
choice (optional)
"""
filter = django_filters.ModelChoiceFilter
widget = widgets.APISelect
# TODO: Remove display_field in v3.0
def __init__(self, display_field='display', query_params=None, initial_params=None, null_option=None,
disabled_indicator=None, *args, **kwargs):
self.display_field = display_field
self.query_params = query_params or {}
self.initial_params = initial_params or {}
self.null_option = null_option
self.disabled_indicator = disabled_indicator
# to_field_name is set by ModelChoiceField.__init__(), but we need to set it early for reference
# by widget_attrs()
self.to_field_name = kwargs.get('to_field_name')
super().__init__(*args, **kwargs)
def widget_attrs(self, widget):
attrs = {
'display-field': self.display_field,
}
# Set value-field attribute if the field specifies to_field_name
if self.to_field_name:
attrs['value-field'] = self.to_field_name
# Set the string used to represent a null option
if self.null_option is not None:
attrs['data-null-option'] = self.null_option
# Set the disabled indicator, if any
if self.disabled_indicator is not None:
attrs['disabled-indicator'] = self.disabled_indicator
# Attach any static query parameters
for key, value in self.query_params.items():
widget.add_query_param(key, value)
return attrs
def get_bound_field(self, form, field_name):
bound_field = BoundField(form, self, field_name)
# Set initial value based on prescribed child fields (if not already set)
if not self.initial and self.initial_params:
filter_kwargs = {}
for kwarg, child_field in self.initial_params.items():
value = form.initial.get(child_field.lstrip('$'))
if value:
filter_kwargs[kwarg] = value
if filter_kwargs:
self.initial = self.queryset.filter(**filter_kwargs).first()
# Modify the QuerySet of the field before we return it. Limit choices to any data already bound: Options
# will be populated on-demand via the APISelect widget.
data = bound_field.value()
if data:
field_name = getattr(self, 'to_field_name') or 'pk'
filter = self.filter(field_name=field_name)
try:
self.queryset = filter.filter(self.queryset, data)
except TypeError:
# Catch any error caused by invalid initial data passed from the user
self.queryset = self.queryset.none()
else:
self.queryset = self.queryset.none()
# Set the data URL on the APISelect widget (if not already set)
widget = bound_field.field.widget
if not widget.attrs.get('data-url'):
app_label = self.queryset.model._meta.app_label
model_name = self.queryset.model._meta.model_name
data_url = reverse('{}-api:{}-list'.format(app_label, model_name))
widget.attrs['data-url'] = data_url
return bound_field
class DynamicModelChoiceField(DynamicModelChoiceMixin, forms.ModelChoiceField):
"""
Override get_bound_field() to avoid pre-populating field choices with a SQL query. The field will be
rendered only with choices set via bound data. Choices are populated on-demand via the APISelect widget.
"""
def clean(self, value):
"""
When null option is enabled and "None" is sent as part of a form to be submitted, it is sent as the
string 'null'. This will check for that condition and gracefully handle the conversion to a NoneType.
"""
if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:
return None
return super().clean(value)
class DynamicModelMultipleChoiceField(DynamicModelChoiceMixin, forms.ModelMultipleChoiceField):
"""
A multiple-choice version of DynamicModelChoiceField.
"""
filter = django_filters.ModelMultipleChoiceFilter
widget = widgets.APISelectMultiple
|
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'elip'
import time
import os
import jinja2
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cloudify.celery import celery as celery_client
from cloudify import manager
from cloudify import utils
from worker_installer import init_worker_installer
from worker_installer.utils import is_on_management_worker
PLUGIN_INSTALLER_PLUGIN_PATH = 'plugin_installer.tasks'
AGENT_INSTALLER_PLUGIN_PATH = 'worker_installer.tasks'
WINDOWS_AGENT_INSTALLER_PLUGIN_PATH = 'windows_agent_installer.tasks'
WINDOWS_PLUGIN_INSTALLER_PLUGIN_PATH = 'windows_plugin_installer.tasks'
DEFAULT_WORKFLOWS_PLUGIN_PATH = 'cloudify.plugins.workflows'
CELERY_INCLUDES_LIST = [
AGENT_INSTALLER_PLUGIN_PATH, PLUGIN_INSTALLER_PLUGIN_PATH,
WINDOWS_AGENT_INSTALLER_PLUGIN_PATH, WINDOWS_PLUGIN_INSTALLER_PLUGIN_PATH,
DEFAULT_WORKFLOWS_PLUGIN_PATH
]
CELERY_CONFIG_PATH = '/packages/templates/{0}-celeryd-cloudify.conf.template'
CELERY_INIT_PATH = '/packages/templates/{0}-celeryd-cloudify.init.template'
AGENT_PACKAGE_PATH = '/packages/agents/{0}-agent.tar.gz'
DISABLE_REQUIRETTY_SCRIPT_PATH = \
'/packages/scripts/{0}-agent-disable-requiretty.sh'
def get_agent_resource_url(ctx, agent_config, resource):
"""returns an agent's resource url
"""
if agent_config.get(resource):
return '{0}/{1}/{2}'.format(
utils.get_manager_file_server_blueprints_root_url(),
ctx.blueprint.id, agent_config[resource])
else:
resource_path = locals()[resource.upper()]
return '{0}/{1}'.format(
utils.get_manager_file_server_url(),
resource_path.format(agent_config['distro']))
def get_celery_includes_list():
return CELERY_INCLUDES_LIST
def download_resource_on_host(logger, runner, url, destination_path):
"""downloads a resource from the fileserver on the agent's host
"""
logger.debug('attempting to download {0} to {1}'.format(
url, destination_path))
logger.debug('checking if wget exists on the host machine')
r = runner.run('which wget')
if r.succeeded:
logger.debug('wget-ing {0} to {1}'.format(url, destination_path))
return runner.run('wget -T 30 {0} -O {1}'.format(
url, destination_path))
logger.debug('checking if curl exists on the host machine')
r = runner.run('which curl')
if r.succeeded:
logger.debug('curl-ing {0} to {1}'.format(url, destination_path))
return runner.run('curl {0} -O {1}'.format(
url, destination_path))
logger.warn('could not download resource')
return r.succeeded
@operation
@init_worker_installer
def install(ctx, runner, agent_config, **kwargs):
try:
agent_package_url = get_agent_resource_url(
ctx, agent_config, 'agent_package_path')
except Exception as ex:
raise NonRecoverableError(
'failed to retrieve agent package url ({0})'.format(ex))
ctx.logger.debug("Pinging agent installer target")
runner.ping()
ctx.logger.info(
"installing celery worker {0}".format(agent_config['name']))
if worker_exists(runner, agent_config):
ctx.logger.info("Worker for deployment {0} is already installed. "
"nothing to do.".format(ctx.deployment_id))
return
ctx.logger.info(
'Installing celery worker [cloudify_agent={0}]'.format(agent_config))
runner.run('mkdir -p {0}'.format(agent_config['base_dir']))
ctx.logger.debug(
'Downloading agent package from: {0}'.format(agent_package_url))
r = download_resource_on_host(
ctx.logger, runner, agent_package_url, '{0}/{1}'.format(
agent_config['base_dir'], 'agent.tar.gz'))
if not r:
raise NonRecoverableError('failed to download agent package')
ctx.logger.debug('extracting agent package on host')
runner.run(
'tar xzvf {0}/agent.tar.gz --strip=2 -C {2}'.format(
agent_config['base_dir'], agent_config['base_dir']))
ctx.logger.debug('configuring virtualenv')
for link in ['archives', 'bin', 'include', 'lib']:
link_path = '{0}/env/local/{1}'.format(agent_config['base_dir'], link)
try:
runner.run('unlink {0}'.format(link_path))
runner.run('ln -s {0}/env/{1} {2}'.format(
agent_config['base_dir'], link, link_path))
except Exception as e:
ctx.logger.warn('Error processing link: {0} [error={1}] - '
'ignoring..'.format(link_path, str(e)))
create_celery_configuration(
ctx, runner, agent_config, manager.get_resource)
runner.run('sudo chmod +x {0}'.format(agent_config['init_file']))
# This is for fixing virtualenv included in package paths
runner.run("sed -i '1 s|.*/bin/python.*$|#!{0}/env/bin/python|g' "
"{0}/env/bin/*".format(agent_config['base_dir']))
# Remove downloaded agent package
runner.run('rm {0}/agent.tar.gz'.format(agent_config['base_dir']))
# Disable requiretty
if agent_config['disable_requiretty']:
try:
disable_requiretty_script_url = get_agent_resource_url(
ctx, agent_config, 'disable_requiretty_script')
except:
raise NonRecoverableError(
'failed to retrieve disable-requiretty script url')
ctx.logger.debug("Removing requiretty in sudoers file")
disable_requiretty_script = '{0}/disable-requiretty.sh'.format(
agent_config['base_dir'])
r = download_resource_on_host(
ctx.logger, runner, disable_requiretty_script_url,
disable_requiretty_script)
if not r:
raise NonRecoverableError(
'failed to download disable-requiretty script')
runner.run('chmod +x {0}'.format(disable_requiretty_script))
runner.run('sudo {0}'.format(disable_requiretty_script))
@operation
@init_worker_installer
def uninstall(ctx, runner, agent_config, **kwargs):
ctx.logger.info(
'Uninstalling celery worker [cloudify_agent={0}]'.format(agent_config))
files_to_delete = [
agent_config['init_file'], agent_config['config_file']
]
folders_to_delete = [agent_config['base_dir']]
delete_files_if_exist(ctx, agent_config, runner, files_to_delete)
delete_folders_if_exist(ctx, agent_config, runner, folders_to_delete)
def delete_files_if_exist(ctx, agent_config, runner, files):
missing_files = []
for file_to_delete in files:
if runner.exists(file_to_delete):
runner.run("sudo rm {0}".format(file_to_delete))
else:
missing_files.append(file_to_delete)
if missing_files:
ctx.logger.debug(
"Could not find files {0} while trying to uninstall worker {1}"
.format(missing_files, agent_config['name']))
def delete_folders_if_exist(ctx, agent_config, runner, folders):
missing_folders = []
for folder_to_delete in folders:
if runner.exists(folder_to_delete):
runner.run('sudo rm -rf {0}'.format(folder_to_delete))
else:
missing_folders.append(folder_to_delete)
if missing_folders:
ctx.logger.debug(
'Could not find folders {0} while trying to uninstall worker {1}'
.format(missing_folders, agent_config['name']))
@operation
@init_worker_installer
def stop(ctx, runner, agent_config, **kwargs):
ctx.logger.info("stopping celery worker {0}".format(agent_config['name']))
if runner.exists(agent_config['init_file']):
runner.run(
"sudo service celeryd-{0} stop".format(agent_config["name"]))
else:
ctx.logger.debug(
"Could not find any workers with name {0}. nothing to do."
.format(agent_config["name"]))
@operation
@init_worker_installer
def start(ctx, runner, agent_config, **kwargs):
ctx.logger.info("starting celery worker {0}".format(agent_config['name']))
runner.run("sudo service celeryd-{0} start".format(agent_config["name"]))
_wait_for_started(runner, agent_config)
@operation
@init_worker_installer
def restart(ctx, runner, agent_config, **kwargs):
ctx.logger.info(
"restarting celery worker {0}".format(agent_config['name']))
restart_celery_worker(runner, agent_config)
def get_agent_ip(ctx, agent_config):
if is_on_management_worker(ctx):
return utils.get_manager_ip()
return agent_config['host']
def create_celery_configuration(ctx, runner, agent_config, resource_loader):
create_celery_includes_file(ctx, runner, agent_config)
loader = jinja2.FunctionLoader(resource_loader)
env = jinja2.Environment(loader=loader)
config_template = env.get_template(CELERY_CONFIG_PATH.format(
agent_config['distro']))
config_template_values = {
'includes_file_path': agent_config['includes_file'],
'celery_base_dir': agent_config['celery_base_dir'],
'worker_modifier': agent_config['name'],
'management_ip': utils.get_manager_ip(),
'broker_ip': '127.0.0.1' if is_on_management_worker(ctx)
else utils.get_manager_ip(),
'agent_ip': get_agent_ip(ctx, agent_config),
'celery_user': agent_config['user'],
'celery_group': agent_config['user'],
'worker_autoscale': '{0},{1}'.format(agent_config['max_workers'],
agent_config['min_workers'])
}
ctx.logger.debug(
'Populating celery config jinja2 template with the following '
'values: {0}'.format(config_template_values))
config = config_template.render(config_template_values)
init_template = env.get_template(CELERY_INIT_PATH.format(
agent_config['distro']))
init_template_values = {
'celery_base_dir': agent_config['celery_base_dir'],
'worker_modifier': agent_config['name']
}
ctx.logger.debug(
'Populating celery init.d jinja2 template with the following '
'values: {0}'.format(init_template_values))
init = init_template.render(init_template_values)
ctx.logger.debug(
'Creating celery config and init files [cloudify_agent={0}]'.format(
agent_config))
# runner.put(agent_config['config_file'], config, use_sudo=True)
# runner.put(agent_config['init_file'], init, use_sudo=True)
celery_config_url = get_agent_resource_url(
ctx, agent_config, 'celery_config_path')
celery_init_url = get_agent_resource_url(
ctx, agent_config, 'celery_init_path')
r = download_resource_on_host(
ctx.logger, runner, celery_config_url, config)
if not r:
raise NonRecoverableError('failed to download celery config file')
r = download_resource_on_host(ctx.logger, runner, celery_init_url, init)
if not r:
raise NonRecoverableError('failed to download celery init file')
def create_celery_includes_file(ctx, runner, agent_config):
# build initial includes
includes_list = get_celery_includes_list()
runner.put(agent_config['includes_file'],
'INCLUDES={0}\n'.format(','.join(includes_list)))
ctx.logger.debug('Created celery includes file [file=%s, content=%s]',
agent_config['includes_file'],
includes_list)
def worker_exists(runner, agent_config):
return runner.exists(agent_config['base_dir'])
def restart_celery_worker(runner, agent_config):
runner.run("sudo service celeryd-{0} restart".format(
agent_config['name']))
_wait_for_started(runner, agent_config)
def _verify_no_celery_error(runner, agent_config):
celery_error_out = os.path.join(
agent_config['base_dir'], 'work/celery_error.out')
# this means the celery worker had an uncaught
# exception and it wrote its content
# to the file above because of our custom exception handler (see celery.py)
if runner.exists(celery_error_out):
output = runner.get(celery_error_out)
runner.run('rm {0}'.format(celery_error_out))
raise NonRecoverableError(
'Celery worker failed to start:\n{0}'.format(output))
def _wait_for_started(runner, agent_config):
_verify_no_celery_error(runner, agent_config)
worker_name = 'celery.{}'.format(agent_config['name'])
inspect = celery_client.control.inspect(destination=[worker_name])
wait_started_timeout = agent_config['wait_started_timeout']
timeout = time.time() + wait_started_timeout
interval = agent_config['wait_started_interval']
while time.time() < timeout:
stats = (inspect.stats() or {}).get(worker_name)
if stats:
return
time.sleep(interval)
_verify_no_celery_error(runner, agent_config)
raise NonRecoverableError('Failed starting agent. waited for {} seconds.'
.format(wait_started_timeout))
fixed agent resource reference
#########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'elip'
import time
import os
import jinja2
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cloudify.celery import celery as celery_client
from cloudify import manager
from cloudify import utils
from worker_installer import init_worker_installer
from worker_installer.utils import is_on_management_worker
PLUGIN_INSTALLER_PLUGIN_PATH = 'plugin_installer.tasks'
AGENT_INSTALLER_PLUGIN_PATH = 'worker_installer.tasks'
WINDOWS_AGENT_INSTALLER_PLUGIN_PATH = 'windows_agent_installer.tasks'
WINDOWS_PLUGIN_INSTALLER_PLUGIN_PATH = 'windows_plugin_installer.tasks'
DEFAULT_WORKFLOWS_PLUGIN_PATH = 'cloudify.plugins.workflows'
CELERY_INCLUDES_LIST = [
AGENT_INSTALLER_PLUGIN_PATH, PLUGIN_INSTALLER_PLUGIN_PATH,
WINDOWS_AGENT_INSTALLER_PLUGIN_PATH, WINDOWS_PLUGIN_INSTALLER_PLUGIN_PATH,
DEFAULT_WORKFLOWS_PLUGIN_PATH
]
CELERY_CONFIG_PATH = '/packages/templates/{0}-celeryd-cloudify.conf.template'
CELERY_INIT_PATH = '/packages/templates/{0}-celeryd-cloudify.init.template'
AGENT_PACKAGE_PATH = '/packages/agents/{0}-agent.tar.gz'
DISABLE_REQUIRETTY_SCRIPT_PATH = \
'/packages/scripts/{0}-agent-disable-requiretty.sh'
def get_agent_resource_url(ctx, agent_config, resource):
"""returns an agent's resource url
"""
if agent_config.get(resource):
return os.path.join(
utils.get_manager_file_server_blueprints_root_url(),
os.path.join(ctx.blueprint.id, agent_config[resource]))
else:
resource_path = globals()[resource.upper()]
return os.path.join(utils.get_manager_file_server_url(),
resource_path.format(agent_config['distro']))
def get_celery_includes_list():
return CELERY_INCLUDES_LIST
def download_resource_on_host(logger, runner, url, destination_path):
"""downloads a resource from the fileserver on the agent's host
"""
logger.debug('attempting to download {0} to {1}'.format(
url, destination_path))
logger.debug('checking if wget exists on the host machine')
r = runner.run('which wget')
if r.succeeded:
logger.debug('wget-ing {0} to {1}'.format(url, destination_path))
return runner.run('wget -T 30 {0} -O {1}'.format(
url, destination_path))
logger.debug('checking if curl exists on the host machine')
r = runner.run('which curl')
if r.succeeded:
logger.debug('curl-ing {0} to {1}'.format(url, destination_path))
return runner.run('curl {0} -O {1}'.format(
url, destination_path))
logger.warn('could not download resource')
return r.succeeded
@operation
@init_worker_installer
def install(ctx, runner, agent_config, **kwargs):
# try:
agent_package_url = get_agent_resource_url(
ctx, agent_config, 'agent_package_path')
# except Exception as ex:
# raise NonRecoverableError(
# 'failed to retrieve agent package url ({0})'.format(ex))
ctx.logger.debug("Pinging agent installer target")
runner.ping()
ctx.logger.info(
"installing celery worker {0}".format(agent_config['name']))
if worker_exists(runner, agent_config):
ctx.logger.info("Worker for deployment {0} is already installed. "
"nothing to do.".format(ctx.deployment_id))
return
ctx.logger.info(
'Installing celery worker [cloudify_agent={0}]'.format(agent_config))
runner.run('mkdir -p {0}'.format(agent_config['base_dir']))
ctx.logger.debug(
'Downloading agent package from: {0}'.format(agent_package_url))
r = download_resource_on_host(
ctx.logger, runner, agent_package_url, '{0}/{1}'.format(
agent_config['base_dir'], 'agent.tar.gz'))
if not r:
raise NonRecoverableError('failed to download agent package')
ctx.logger.debug('extracting agent package on host')
runner.run(
'tar xzvf {0}/agent.tar.gz --strip=2 -C {2}'.format(
agent_config['base_dir'], agent_config['base_dir']))
ctx.logger.debug('configuring virtualenv')
for link in ['archives', 'bin', 'include', 'lib']:
link_path = '{0}/env/local/{1}'.format(agent_config['base_dir'], link)
try:
runner.run('unlink {0}'.format(link_path))
runner.run('ln -s {0}/env/{1} {2}'.format(
agent_config['base_dir'], link, link_path))
except Exception as e:
ctx.logger.warn('Error processing link: {0} [error={1}] - '
'ignoring..'.format(link_path, str(e)))
create_celery_configuration(
ctx, runner, agent_config, manager.get_resource)
runner.run('sudo chmod +x {0}'.format(agent_config['init_file']))
# This is for fixing virtualenv included in package paths
runner.run("sed -i '1 s|.*/bin/python.*$|#!{0}/env/bin/python|g' "
"{0}/env/bin/*".format(agent_config['base_dir']))
# Remove downloaded agent package
runner.run('rm {0}/agent.tar.gz'.format(agent_config['base_dir']))
# Disable requiretty
if agent_config['disable_requiretty']:
try:
disable_requiretty_script_url = get_agent_resource_url(
ctx, agent_config, 'disable_requiretty_script')
except:
raise NonRecoverableError(
'failed to retrieve disable-requiretty script url')
ctx.logger.debug("Removing requiretty in sudoers file")
disable_requiretty_script = '{0}/disable-requiretty.sh'.format(
agent_config['base_dir'])
r = download_resource_on_host(
ctx.logger, runner, disable_requiretty_script_url,
disable_requiretty_script)
if not r:
raise NonRecoverableError(
'failed to download disable-requiretty script')
runner.run('chmod +x {0}'.format(disable_requiretty_script))
runner.run('sudo {0}'.format(disable_requiretty_script))
@operation
@init_worker_installer
def uninstall(ctx, runner, agent_config, **kwargs):
ctx.logger.info(
'Uninstalling celery worker [cloudify_agent={0}]'.format(agent_config))
files_to_delete = [
agent_config['init_file'], agent_config['config_file']
]
folders_to_delete = [agent_config['base_dir']]
delete_files_if_exist(ctx, agent_config, runner, files_to_delete)
delete_folders_if_exist(ctx, agent_config, runner, folders_to_delete)
def delete_files_if_exist(ctx, agent_config, runner, files):
missing_files = []
for file_to_delete in files:
if runner.exists(file_to_delete):
runner.run("sudo rm {0}".format(file_to_delete))
else:
missing_files.append(file_to_delete)
if missing_files:
ctx.logger.debug(
"Could not find files {0} while trying to uninstall worker {1}"
.format(missing_files, agent_config['name']))
def delete_folders_if_exist(ctx, agent_config, runner, folders):
missing_folders = []
for folder_to_delete in folders:
if runner.exists(folder_to_delete):
runner.run('sudo rm -rf {0}'.format(folder_to_delete))
else:
missing_folders.append(folder_to_delete)
if missing_folders:
ctx.logger.debug(
'Could not find folders {0} while trying to uninstall worker {1}'
.format(missing_folders, agent_config['name']))
@operation
@init_worker_installer
def stop(ctx, runner, agent_config, **kwargs):
ctx.logger.info("stopping celery worker {0}".format(agent_config['name']))
if runner.exists(agent_config['init_file']):
runner.run(
"sudo service celeryd-{0} stop".format(agent_config["name"]))
else:
ctx.logger.debug(
"Could not find any workers with name {0}. nothing to do."
.format(agent_config["name"]))
@operation
@init_worker_installer
def start(ctx, runner, agent_config, **kwargs):
ctx.logger.info("starting celery worker {0}".format(agent_config['name']))
runner.run("sudo service celeryd-{0} start".format(agent_config["name"]))
_wait_for_started(runner, agent_config)
@operation
@init_worker_installer
def restart(ctx, runner, agent_config, **kwargs):
ctx.logger.info(
"restarting celery worker {0}".format(agent_config['name']))
restart_celery_worker(runner, agent_config)
def get_agent_ip(ctx, agent_config):
if is_on_management_worker(ctx):
return utils.get_manager_ip()
return agent_config['host']
def create_celery_configuration(ctx, runner, agent_config, resource_loader):
create_celery_includes_file(ctx, runner, agent_config)
loader = jinja2.FunctionLoader(resource_loader)
env = jinja2.Environment(loader=loader)
config_template = env.get_template(CELERY_CONFIG_PATH.format(
agent_config['distro']))
config_template_values = {
'includes_file_path': agent_config['includes_file'],
'celery_base_dir': agent_config['celery_base_dir'],
'worker_modifier': agent_config['name'],
'management_ip': utils.get_manager_ip(),
'broker_ip': '127.0.0.1' if is_on_management_worker(ctx)
else utils.get_manager_ip(),
'agent_ip': get_agent_ip(ctx, agent_config),
'celery_user': agent_config['user'],
'celery_group': agent_config['user'],
'worker_autoscale': '{0},{1}'.format(agent_config['max_workers'],
agent_config['min_workers'])
}
ctx.logger.debug(
'Populating celery config jinja2 template with the following '
'values: {0}'.format(config_template_values))
config = config_template.render(config_template_values)
init_template = env.get_template(CELERY_INIT_PATH.format(
agent_config['distro']))
init_template_values = {
'celery_base_dir': agent_config['celery_base_dir'],
'worker_modifier': agent_config['name']
}
ctx.logger.debug(
'Populating celery init.d jinja2 template with the following '
'values: {0}'.format(init_template_values))
init = init_template.render(init_template_values)
ctx.logger.debug(
'Creating celery config and init files [cloudify_agent={0}]'.format(
agent_config))
# runner.put(agent_config['config_file'], config, use_sudo=True)
# runner.put(agent_config['init_file'], init, use_sudo=True)
celery_config_url = get_agent_resource_url(
ctx, agent_config, 'celery_config_path')
celery_init_url = get_agent_resource_url(
ctx, agent_config, 'celery_init_path')
r = download_resource_on_host(
ctx.logger, runner, celery_config_url, config)
if not r:
raise NonRecoverableError('failed to download celery config file')
r = download_resource_on_host(ctx.logger, runner, celery_init_url, init)
if not r:
raise NonRecoverableError('failed to download celery init file')
def create_celery_includes_file(ctx, runner, agent_config):
# build initial includes
includes_list = get_celery_includes_list()
r = download_resource_on_host(
ctx.logger, runner, agent_config['includes_file'],
'INCLUDES={0}\n'.format(','.join(includes_list)))
if not r:
raise NonRecoverableError('failed to download celery includes file')
ctx.logger.debug('Created celery includes file [file=%s, content=%s]',
agent_config['includes_file'],
includes_list)
def worker_exists(runner, agent_config):
return runner.exists(agent_config['base_dir'])
def restart_celery_worker(runner, agent_config):
runner.run("sudo service celeryd-{0} restart".format(
agent_config['name']))
_wait_for_started(runner, agent_config)
def _verify_no_celery_error(runner, agent_config):
celery_error_out = os.path.join(
agent_config['base_dir'], 'work/celery_error.out')
# this means the celery worker had an uncaught
# exception and it wrote its content
# to the file above because of our custom exception handler (see celery.py)
if runner.exists(celery_error_out):
output = runner.get(celery_error_out)
runner.run('rm {0}'.format(celery_error_out))
raise NonRecoverableError(
'Celery worker failed to start:\n{0}'.format(output))
def _wait_for_started(runner, agent_config):
_verify_no_celery_error(runner, agent_config)
worker_name = 'celery.{}'.format(agent_config['name'])
inspect = celery_client.control.inspect(destination=[worker_name])
wait_started_timeout = agent_config['wait_started_timeout']
timeout = time.time() + wait_started_timeout
interval = agent_config['wait_started_interval']
while time.time() < timeout:
stats = (inspect.stats() or {}).get(worker_name)
if stats:
return
time.sleep(interval)
_verify_no_celery_error(runner, agent_config)
raise NonRecoverableError('Failed starting agent. waited for {} seconds.'
.format(wait_started_timeout))
|
from datetime import datetime
from peewee import *
# Tell peewee what the database file is
# We use capital letters for this variable name according to custom, as it indicates
# something that will not change
DATABASE = "guestbook.db"
DATE = datetime.now().strftime("%H:%M - %d/%m/%y")
# Tell peewee to create a sqllite datbase called guestbook.db
database = SqliteDatabase(DATABASE)
# All models will inherit from this BaseModel, it saves us defined the database
# to use every time we create a new model
class BaseModel(Model):
class Meta:
database = database
# This is the model that lists all the information the guestbook form will collect
class Post(BaseModel):
name = CharField()
email = CharField(null=True)
website = CharField(null=True)
comment = TextField()
date = DateTimeField()
# We should only need to run the create_tables function once (and therefore,
# this database.py script once), each time a new model is created
# If a new field is added to the model, drop and recreate the table
def create_tables():
database.connect()
database.create_tables([Post])
database.close()
# create_tables()
database.connect()
# Add a post to the database
post_one = Post.create(name="Jamiroquai", website="http://www.jamiroquai.co.uk", \
comment="Charlotte this guestbook is off the chain! You are 2kool4skool. Love Jam.", \
date=DATE)
post_two = Post.create(name="Satan", comment="666lol", date=DATE)
# Close the database
database.close()
print("Created a table!")
Update database.py comments
# Peewee allows us to worth with the database
from peewee import *
# Import datetime to use nicely formatted dates in our database
from datetime import datetime
# Tell peewee what the database file is
DATABASE = "guestbook.db"
# The dates will look like: 20:52 - 28/04/1991
DATE = datetime.now().strftime("%H:%M - %d/%m/%y")
# Tell peewee to create a sqllite datbase called guestbook.db
database = SqliteDatabase(DATABASE)
# All models will inherit from this BaseModel, it saves us defining the database
# to use every time we create a new model
class BaseModel(Model):
class Meta:
database = database
# This is the model that lists all the information the guestbook form will collect
class Post(BaseModel):
name = CharField()
email = CharField(null=True)
website = CharField(null=True)
comment = TextField()
date = DateTimeField()
# We should only need to run the create_tables function once (and therefore,
# this database.py script once), each time a new model is created
# If a new field is added to the model, drop and recreate the table
def create_tables():
database.connect()
database.create_tables([Post])
database.close()
# create_tables()
# Connect to the database to work with it
database.connect()
# Add some dummy posts to the database, feel free to change or delete this code
post_one = Post.create(name="Jamiroquai", website="http://www.jamiroquai.co.uk", \
comment="Charlotte this guestbook is off the chain! You are 2kool4skool. Love Jam.", \
date=DATE)
post_two = Post.create(name="Satan", comment="666lol", date=DATE)
# Close the database
database.close()
print("Created the database!")
|
'''
Image
=====
The :class:`Image` widget is used to display an image::
wimg = Image(source='mylogo.png')
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style.
For example, if you want your image to be greater than,the size of your widget,
you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
'''
__all__ = ('Image', 'AsyncImage')
from kivy.uix.widget import Widget
from kivy.core.image import Image as CoreImage
from kivy.resources import resource_find
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty
from kivy.logger import Logger
# delayed imports
Loader = None
class Image(Widget):
'''Image class, see module documentation for more information.
'''
source = StringProperty(None)
'''Filename / source of your image.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture. It is streched and positioned during rendering according to
the :attr:`allow_stretch` and :attr:`keep_ratio` properties.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is a :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the image. This represents the original, loaded image
texture size.
.. warning::
The texture size is set after the texture property. So if you listen to
the change on :attr:`texture`, the property texture_size will not be
up-to-date. Use self.texture.size instead.
'''
def get_image_ratio(self):
if self.texture:
return self.texture.width / float(self.texture.height)
return 1.
mipmap = BooleanProperty(False)
'''Indicate if you want OpenGL mipmapping to be applied to the texture.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
'''Ratio of the image (width / float(height).
:attr:`image_ratio` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
color = ListProperty([1, 1, 1, 1])
'''Image color, in the format (r, g, b, a). This attribute can be used to
'tint' an image. Be careful: if the source image is not gray/white, the
color will not really work as expected.
.. versionadded:: 1.0.6
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
allow_stretch = BooleanProperty(False)
'''If True, the normalized image size will be maximized to fit in the image
box. Otherwise, if the box is too tall, the image will not be
stretched more than 1:1 pixels.
.. versionadded:: 1.0.7
:attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keep_ratio = BooleanProperty(True)
'''If False along with allow_stretch being True, the normalized image
size will be maximized to fit in the image box and ignores the aspect
ratio of the image.
Otherwise, if the box is too tall, the image will not be stretched more
than 1:1 pixels.
.. versionadded:: 1.0.8
:attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
keep_data = BooleanProperty(False)
'''If True, the underlaying _coreimage will store the raw image data.
This is useful when performing pixel based collision detection.
.. versionadded:: 1.3.0
:attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
anim_delay = NumericProperty(.25)
'''Delay the animation if the image is sequenced (like an animated gif).
If anim_delay is set to -1, the animation will be stopped.
.. versionadded:: 1.0.8
:attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25 (4 FPS).
'''
anim_loop = NumericProperty(0)
'''Number of loops to play then stop animating. 0 means keep animating.
.. versionadded:: 1.9.0
:attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` defaults
to 0.
'''
nocache = BooleanProperty(False)
'''If this property is set True, the image will not be added to the
internal cache. The cache will simply ignore any calls trying to
append the core image.
.. versionadded:: 1.6.0
:attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
def get_norm_image_size(self):
if not self.texture:
return self.size
ratio = self.image_ratio
w, h = self.size
tw, th = self.texture.size
# ensure that the width is always maximized to the containter width
if self.allow_stretch:
if not self.keep_ratio:
return w, h
iw = w
else:
iw = min(w, tw)
# calculate the appropriate height
ih = iw / ratio
# if the height is too higher, take the height of the container
# and calculate appropriate width. no need to test further. :)
if ih > h:
if self.allow_stretch:
ih = h
else:
ih = min(h, th)
iw = ih * ratio
return iw, ih
norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
'texture', 'size', 'image_ratio', 'allow_stretch'))
'''Normalized image size within the widget box.
This size will always fit the widget size and will preserve the image
ratio.
:attr:`norm_image_size` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
def __init__(self, **kwargs):
self._coreimage = None
self._loops = 0
super(Image, self).__init__(**kwargs)
fbind = self.fbind
update = self.texture_update
fbind('source', update)
fbind('mipmap', update)
if self.source:
update()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def texture_update(self, *largs):
if not self.source:
self.texture = None
else:
filename = resource_find(self.source)
self._loops = 0
if filename is None:
return Logger.error('Image: Error reading file {filename}'.
format(filename=self.source))
mipmap = self.mipmap
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
try:
self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
anim_delay=self.anim_delay,
keep_data=self.keep_data,
nocache=self.nocache)
except:
self._coreimage = ci = None
if ci:
ci.bind(on_texture=self._on_tex_change)
self.texture = ci.texture
def on_anim_delay(self, instance, value):
self._loop = 0
if self._coreimage is None:
return
self._coreimage.anim_delay = value
if value < 0:
self._coreimage.anim_reset(False)
def on_texture(self, instance, value):
if value is not None:
self.texture_size = list(value.size)
def _on_tex_change(self, *largs):
# update texture from core image
self.texture = self._coreimage.texture
ci = self._coreimage
if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1:
self._loops += 1
if self.anim_loop == self._loops:
ci.anim_reset(False)
self._loops = 0
def reload(self):
'''Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
'''
try:
self._coreimage.remove_from_cache()
except AttributeError:
pass
olsource = self.source
self.source = ''
self.source = olsource
def on_nocache(self, *args):
if self.nocache and self._coreimage:
self._coreimage.remove_from_cache()
self._coreimage._nocache = True
class AsyncImage(Image):
'''Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
'''
def __init__(self, **kwargs):
self._coreimage = None
super(AsyncImage, self).__init__(**kwargs)
global Loader
if not Loader:
from kivy.loader import Loader
self.fbind('source', self._load_source)
if self.source:
self._load_source()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def _load_source(self, *args):
source = self.source
if not source:
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
if not self.is_uri(source):
source = resource_find(source)
self._coreimage = image = Loader.image(source,
nocache=self.nocache, mipmap=self.mipmap,
anim_delay=self.anim_delay)
image.bind(on_load=self._on_source_load)
image.bind(on_texture=self._on_tex_change)
self.texture = image.texture
def _on_source_load(self, value):
image = self._coreimage.image
if not image:
return
self.texture = image.texture
def is_uri(self, filename):
proto = filename.split('://', 1)[0]
return proto in ('http', 'https', 'ftp', 'smb')
def _on_tex_change(self, *largs):
if self._coreimage:
self.texture = self._coreimage.texture
def texture_update(self, *largs):
pass
pep8 fix
'''
Image
=====
The :class:`Image` widget is used to display an image::
wimg = Image(source='mylogo.png')
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style.
For example, if you want your image to be greater than,the size of your widget,
you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
'''
__all__ = ('Image', 'AsyncImage')
from kivy.uix.widget import Widget
from kivy.core.image import Image as CoreImage
from kivy.resources import resource_find
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty
from kivy.logger import Logger
# delayed imports
Loader = None
class Image(Widget):
'''Image class, see module documentation for more information.
'''
source = StringProperty(None)
'''Filename / source of your image.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture. It is streched and positioned during rendering according to
the :attr:`allow_stretch` and :attr:`keep_ratio` properties.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is a :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the image. This represents the original, loaded image
texture size.
.. warning::
The texture size is set after the texture property. So if you listen to
the change on :attr:`texture`, the property texture_size will not be
up-to-date. Use self.texture.size instead.
'''
def get_image_ratio(self):
if self.texture:
return self.texture.width / float(self.texture.height)
return 1.
mipmap = BooleanProperty(False)
'''Indicate if you want OpenGL mipmapping to be applied to the texture.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
'''Ratio of the image (width / float(height).
:attr:`image_ratio` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
color = ListProperty([1, 1, 1, 1])
'''Image color, in the format (r, g, b, a). This attribute can be used to
'tint' an image. Be careful: if the source image is not gray/white, the
color will not really work as expected.
.. versionadded:: 1.0.6
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
allow_stretch = BooleanProperty(False)
'''If True, the normalized image size will be maximized to fit in the image
box. Otherwise, if the box is too tall, the image will not be
stretched more than 1:1 pixels.
.. versionadded:: 1.0.7
:attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keep_ratio = BooleanProperty(True)
'''If False along with allow_stretch being True, the normalized image
size will be maximized to fit in the image box and ignores the aspect
ratio of the image.
Otherwise, if the box is too tall, the image will not be stretched more
than 1:1 pixels.
.. versionadded:: 1.0.8
:attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
keep_data = BooleanProperty(False)
'''If True, the underlaying _coreimage will store the raw image data.
This is useful when performing pixel based collision detection.
.. versionadded:: 1.3.0
:attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
anim_delay = NumericProperty(.25)
'''Delay the animation if the image is sequenced (like an animated gif).
If anim_delay is set to -1, the animation will be stopped.
.. versionadded:: 1.0.8
:attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25 (4 FPS).
'''
anim_loop = NumericProperty(0)
'''Number of loops to play then stop animating. 0 means keep animating.
.. versionadded:: 1.9.0
:attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` defaults
to 0.
'''
nocache = BooleanProperty(False)
'''If this property is set True, the image will not be added to the
internal cache. The cache will simply ignore any calls trying to
append the core image.
.. versionadded:: 1.6.0
:attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
def get_norm_image_size(self):
if not self.texture:
return self.size
ratio = self.image_ratio
w, h = self.size
tw, th = self.texture.size
# ensure that the width is always maximized to the containter width
if self.allow_stretch:
if not self.keep_ratio:
return w, h
iw = w
else:
iw = min(w, tw)
# calculate the appropriate height
ih = iw / ratio
# if the height is too higher, take the height of the container
# and calculate appropriate width. no need to test further. :)
if ih > h:
if self.allow_stretch:
ih = h
else:
ih = min(h, th)
iw = ih * ratio
return iw, ih
norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
'texture', 'size', 'image_ratio', 'allow_stretch'))
'''Normalized image size within the widget box.
This size will always fit the widget size and will preserve the image
ratio.
:attr:`norm_image_size` is a :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
def __init__(self, **kwargs):
self._coreimage = None
self._loops = 0
super(Image, self).__init__(**kwargs)
fbind = self.fbind
update = self.texture_update
fbind('source', update)
fbind('mipmap', update)
if self.source:
update()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def texture_update(self, *largs):
if not self.source:
self.texture = None
else:
filename = resource_find(self.source)
self._loops = 0
if filename is None:
return Logger.error('Image: Error reading file {filename}'.
format(filename=self.source))
mipmap = self.mipmap
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
try:
self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
anim_delay=self.anim_delay,
keep_data=self.keep_data,
nocache=self.nocache)
except:
self._coreimage = ci = None
if ci:
ci.bind(on_texture=self._on_tex_change)
self.texture = ci.texture
def on_anim_delay(self, instance, value):
self._loop = 0
if self._coreimage is None:
return
self._coreimage.anim_delay = value
if value < 0:
self._coreimage.anim_reset(False)
def on_texture(self, instance, value):
if value is not None:
self.texture_size = list(value.size)
def _on_tex_change(self, *largs):
# update texture from core image
self.texture = self._coreimage.texture
ci = self._coreimage
if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1:
self._loops += 1
if self.anim_loop == self._loops:
ci.anim_reset(False)
self._loops = 0
def reload(self):
'''Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
'''
try:
self._coreimage.remove_from_cache()
except AttributeError:
pass
olsource = self.source
self.source = ''
self.source = olsource
def on_nocache(self, *args):
if self.nocache and self._coreimage:
self._coreimage.remove_from_cache()
self._coreimage._nocache = True
class AsyncImage(Image):
'''Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
'''
def __init__(self, **kwargs):
self._coreimage = None
super(AsyncImage, self).__init__(**kwargs)
global Loader
if not Loader:
from kivy.loader import Loader
self.fbind('source', self._load_source)
if self.source:
self._load_source()
self.on_anim_delay(self, kwargs.get('anim_delay', .25))
def _load_source(self, *args):
source = self.source
if not source:
if self._coreimage is not None:
self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
if not self.is_uri(source):
source = resource_find(source)
self._coreimage = image = Loader.image(source,
nocache=self.nocache, mipmap=self.mipmap,
anim_delay=self.anim_delay)
image.bind(on_load=self._on_source_load)
image.bind(on_texture=self._on_tex_change)
self.texture = image.texture
def _on_source_load(self, value):
image = self._coreimage.image
if not image:
return
self.texture = image.texture
def is_uri(self, filename):
proto = filename.split('://', 1)[0]
return proto in ('http', 'https', 'ftp', 'smb')
def _on_tex_change(self, *largs):
if self._coreimage:
self.texture = self._coreimage.texture
def texture_update(self, *largs):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.