text
stringlengths 2
999k
|
|---|
#Check if cython code has been compiled
import os
import subprocess
use_extrapolation=False #experimental correlation code
if use_extrapolation:
print("Importing AfterImage Cython Library")
if not os.path.isfile("AfterImage.c"): #has not yet been compiled, so try to do so...
cmd = "python setup.py build_ext --inplace"
subprocess.call(cmd,shell=True)
#Import dependencies
import src.featureextractor.netStat as ns
import csv
import numpy as np
print("Importing Scapy Library")
from scapy.all import *
import os.path
import platform
import subprocess
#Extracts Kitsune features from given pcap file one packet at a time using "get_next_vector()"
# If wireshark is installed (tshark) it is used to parse (it's faster), otherwise, scapy is used (much slower).
# If wireshark is used then a tsv file (parsed version of the pcap) will be made -which you can use as your input next time
class FE:
def __init__(self,file_path,limit=np.inf):
self.path = file_path
self.limit = limit
self.parse_type = None #unknown
self.curPacketIndx = 0
self.tsvin = None #used for parsing TSV file
self.scapyin = None #used for parsing pcap with scapy
### Prep pcap ##
self.__prep__()
### Prep Feature extractor (AfterImage) ###
maxHost = 100000000000
maxSess = 100000000000
self.nstat = ns.netStat(np.nan, maxHost, maxSess)
def _get_tshark_path(self):
if platform.system() == 'Windows':
return 'C:\Program Files\Wireshark\\tshark.exe'
else:
system_path = os.environ['PATH']
for path in system_path.split(os.pathsep):
filename = os.path.join(path, 'tshark')
if os.path.isfile(filename):
return filename
return ''
def __prep__(self):
### Find file: ###
if not os.path.isfile(self.path): # file does not exist
print("File: " + self.path + " does not exist")
raise Exception()
### check file type ###
type = self.path.split('.')[-1]
self._tshark = self._get_tshark_path()
##If file is TSV (pre-parsed by wireshark script)
if type == "tsv":
self.parse_type = "tsv"
##If file is pcap
elif type == "pcap" or type == 'pcapng':
# Try parsing via tshark dll of wireshark (faster)
if os.path.isfile(self._tshark):
self.pcap2tsv_with_tshark() # creates local tsv file
self.path += ".tsv"
self.parse_type = "tsv"
else: # Otherwise, parse with scapy (slower)
print("tshark not found. Trying scapy...")
self.parse_type = "scapy"
else:
print("File: " + self.path + " is not a tsv or pcap file")
raise Exception()
### open readers ##
if self.parse_type == "tsv":
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt / 10)
decrement = True
print("counting lines in file...")
num_lines = sum(1 for line in open(self.path))
print("There are " + str(num_lines) + " Packets.")
self.limit = min(self.limit, num_lines-1)
self.tsvinf = open(self.path, 'rt', encoding="utf8")
self.tsvin = csv.reader(self.tsvinf, delimiter='\t')
row = self.tsvin.__next__() #move iterator past header
else: # scapy
print("Reading PCAP file via Scapy...")
self.scapyin = rdpcap(self.path)
self.limit = len(self.scapyin)
print("Loaded " + str(len(self.scapyin)) + " Packets.")
def get_next_vector(self):
if self.curPacketIndx == self.limit:
if self.parse_type == 'tsv':
self.tsvinf.close()
return []
### Parse next packet ###
if self.parse_type == "tsv":
row = self.tsvin.__next__()
IPtype = np.nan
timestamp = row[0]
framelen = row[1]
srcIP = ''
dstIP = ''
if row[4] != '': # IPv4
srcIP = row[4]
dstIP = row[5]
IPtype = 0
elif row[17] != '': # ipv6
srcIP = row[17]
dstIP = row[18]
IPtype = 1
srcproto = row[6] + row[
8] # UDP or TCP port: the concatenation of the two port strings will will results in an OR "[tcp|udp]"
dstproto = row[7] + row[9] # UDP or TCP port
srcMAC = row[2]
dstMAC = row[3]
if srcproto == '': # it's a L2/L1 level protocol
if row[12] != '': # is ARP
srcproto = 'arp'
dstproto = 'arp'
srcIP = row[14] # src IP (ARP)
dstIP = row[16] # dst IP (ARP)
IPtype = 0
elif row[10] != '': # is ICMP
srcproto = 'icmp'
dstproto = 'icmp'
IPtype = 0
elif srcIP + srcproto + dstIP + dstproto == '': # some other protocol
srcIP = row[2] # src MAC
dstIP = row[3] # dst MAC
elif self.parse_type == "scapy":
packet = self.scapyin[self.curPacketIndx]
IPtype = np.nan
timestamp = packet.time
framelen = len(packet)
if packet.haslayer(IP): # IPv4
srcIP = packet[IP].src
dstIP = packet[IP].dst
IPtype = 0
elif packet.haslayer(IPv6): # ipv6
srcIP = packet[IPv6].src
dstIP = packet[IPv6].dst
IPtype = 1
else:
srcIP = ''
dstIP = ''
if packet.haslayer(TCP):
srcproto = str(packet[TCP].sport)
dstproto = str(packet[TCP].dport)
elif packet.haslayer(UDP):
srcproto = str(packet[UDP].sport)
dstproto = str(packet[UDP].dport)
else:
srcproto = ''
dstproto = ''
srcMAC = packet.src
dstMAC = packet.dst
if srcproto == '': # it's a L2/L1 level protocol
if packet.haslayer(ARP): # is ARP
srcproto = 'arp'
dstproto = 'arp'
srcIP = packet[ARP].psrc # src IP (ARP)
dstIP = packet[ARP].pdst # dst IP (ARP)
IPtype = 0
elif packet.haslayer(ICMP): # is ICMP
srcproto = 'icmp'
dstproto = 'icmp'
IPtype = 0
elif srcIP + srcproto + dstIP + dstproto == '': # some other protocol
srcIP = packet.src # src MAC
dstIP = packet.dst # dst MAC
else:
return []
self.curPacketIndx = self.curPacketIndx + 1
### Extract Features
try:
return self.nstat.updateGetStats(IPtype, srcMAC, dstMAC, srcIP, srcproto, dstIP, dstproto,
int(framelen),
float(timestamp))
except Exception as e:
print(e)
return []
def pcap2tsv_with_tshark(self):
print('Parsing with tshark...')
fields = "-e frame.time_epoch -e frame.len -e eth.src -e eth.dst -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport -e udp.srcport -e udp.dstport -e icmp.type -e icmp.code -e arp.opcode -e arp.src.hw_mac -e arp.src.proto_ipv4 -e arp.dst.hw_mac -e arp.dst.proto_ipv4 -e ipv6.src -e ipv6.dst"
cmd = '"' + self._tshark + '" -r '+ self.path +' -T fields '+ fields +' -E header=y -E occurrence=f > '+self.path+".tsv"
subprocess.call(cmd,shell=True)
print("tshark parsing complete. File saved as: "+self.path +".tsv")
def get_num_features(self):
return len(self.nstat.getNetStatHeaders())
|
from django.apps import AppConfig
class ProfilebyjimmyConfig(AppConfig):
name = 'profilebyjimmy'
|
# 裁剪视频
import os
from cv2 import cv2
import math
def read_video():
"""
获取到输入的视频路径,并建立保存的路径。
:return:
"""
#video_path = input(r'请输入视频的路径[eg:D:\Video\66.mp4]:')
video_path='./测试.flv'
all_info = video_path.split('/')
file_name = all_info[-1].split('.')[0]
save_path = '/'.join(all_info[:-1]) + '/data' + '/' + file_name + '.avi'
try:
if not os.path.exists(save_path):
os.mkdir('/'.join(all_info[:-1]) + '/data')
except FileExistsError as e:
print(u'保存路径已经创建......')
return video_path, save_path
def clip_video():
"""
对视频任意时间段进行剪切
:return:
"""
video_path, save_path = read_video()
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
print('video is not opened')
else:
success, frame = cap.read()
f_shape = frame.shape
f_height = f_shape[0] # 原视频图片的高度
f_width = f_shape[1]
fps = cap.get(5) # 帧速率
frame_number = cap.get(7) # 视频文件的帧数
duration = frame_number / fps # 视频总帧数/帧速率 是时间/秒【总共有多少秒的视频时间】
print('请注意视频的总时间长度为 %s 秒' % str(duration))
start = input('请输入开始时间/秒为单位 例如输入:0[代表从第 0 秒开始剪切]:')
while True:
if int(start) > int(math.ceil(duration)):
start = input('输入结束时间大于总视频时间请重新输入......时间:')
else:
break
start_time = fps * float(start)
end = input('请输入结束时间/秒为单位 例如输入:10[代表到第 10 秒结束剪切]')
while True:
if int(end) > int(math.ceil(duration)):
end = input('输入结束时间大于总视频时间请重新输入......时间:')
else:
break
end_time = fps * float(end)
# AVI格式编码输出 XVID
# 编码格式可修改,键值对
four_cc = cv2.VideoWriter_fourcc(*'XVID')
video_writer = cv2.VideoWriter(save_path, four_cc, fps, (int(f_width), int(f_height)))
num = 0
while True:
success, frame = cap.read()
if int(start_time) <= int(num) <= int(end_time):
if success:
video_writer.write(frame)
else:
break
num += 1
if num > frame_number:
break
cap.release()
if __name__ == '__main__':
clip_video()
|
from sys import argv
MORSE_CODE_DICT = {'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.',
'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---',
'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-',
'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--',
'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-',
'5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.',
'0': '-----', ', ': '--..--', '.': '.-.-.-', '?': '..--..',
'-': '-....-', '(': '-.--.', ')': '-.--.-'}
def encrypt(message):
cipher = ''
for c in message:
if c != ' ':
cipher += MORSE_CODE_DICT[c] + ' '
else:
cipher += ' '
return cipher
if __name__ == '__main__':
if len(argv) >= 2:
for i in range(1, len(argv)):
argv[i] = argv[i].split()
for elem in argv[i]:
elem = elem.upper()
for car in elem:
if car not in MORSE_CODE_DICT.keys():
print('ERROR')
exit()
for i in range(1, len(argv) - 1):
for elem in argv[i]:
elem = elem.upper()
message_encrypted = encrypt(elem)
if message_encrypted == 'ERROR':
print('ERROR')
exit()
else:
print(message_encrypted, end='/ ')
for elem in argv[len(argv) - 1]:
elem = elem.upper()
message_encrypted = encrypt(elem)
if message_encrypted == 'ERROR':
print('ERROR')
exit()
else:
print(message_encrypted)
|
from .to_jsonic import ToJsonicConverter, ToJsonicConverterError, from_json # noqa
|
"""
Split a PDF file into multiple files
"""
import argparse
import os
import sys
from typing import List, NamedTuple
from csv import DictReader
from pathlib import Path
from PyPDF3 import PdfFileWriter, PdfFileReader
Chapter = NamedTuple('Chapter',
[
('name', str),
('start', int),
('stop', int)
])
def _parse_splits_file(filename: str, newline='') -> List[Chapter]:
assert os.path.exists(filename)
output: List[Chapter] = []
with open(filename, newline='') as file:
reader = DictReader(file)
for row in reader:
name = row['name']
start = int(row['start'])
stop = int(row['end'])
entry = Chapter(name, start, stop)
output.append(entry)
return output
def split(filename: str, chapters: List[Chapter], directory: str) -> None:
assert os.path.exists(filename)
for chapter in chapters:
print(f'Writing to {chapter.name} pages {chapter.start} to {chapter.stop}')
# The file should have been opened before the previous loop,
# however, there is a bug in the library, and this is the only way to
# get around it.
with open(filename, 'rb') as input_stream:
input_pdf = PdfFileReader(input_stream)
output = PdfFileWriter()
for page_number in range(chapter.start-1, chapter.stop):
page = input_pdf.getPage(page_number)
output.addPage(page)
output_filename = os.path.join(directory, chapter.name)
with open(output_filename, "wb") as output_stream:
output.write(output_stream)
output_stream.flush()
if __name__ == '__main__':
_parser = argparse.ArgumentParser(description='Split pages of a PDF file')
_parser.add_argument('filename', type=str, help='File to split')
_parser.add_argument('chapters', type=str, help='List of splits to create (in CSV format)')
args = _parser.parse_args()
if os.path.exists(args.filename) is False:
sys.stderr.write(f'Input file {args.filename} does not exist')
exit(-1)
if os.path.exists(args.chapters) is False:
sys.stderr.write(f'Chapter file {args.chapters} already exists')
exit(-1)
_directory = Path(args.filename).parent.absolute()
_chapters = _parse_splits_file(args.chapters)
split(args.filename, _chapters, _directory)
|
from panda3d.core import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
class FriendManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('FriendManager')
neverDisable = 1
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.__available = 0
self.otherToon = 0
def setAvailable(self, available):
self.__available = available
def getAvailable(self):
return self.__available
def generate(self):
if base.cr.friendManager != None:
base.cr.friendManager.delete()
base.cr.friendManager = self
DistributedObject.DistributedObject.generate(self)
return
def disable(self):
base.cr.friendManager = None
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
base.cr.friendManager = None
DistributedObject.DistributedObject.delete(self)
return
def up_friendQuery(self, inviteeId):
self.otherToon = inviteeId
self.sendUpdate('friendQuery', [inviteeId])
self.notify.debug('Client: friendQuery(%d)' % inviteeId)
def up_cancelFriendQuery(self, context):
self.sendUpdate('cancelFriendQuery', [context])
self.notify.debug('Client: cancelFriendQuery(%d)' % context)
def up_inviteeFriendConsidering(self, yesNo, context):
self.sendUpdate('inviteeFriendConsidering', [yesNo, context])
self.notify.debug('Client: inviteeFriendConsidering(%d, %d)' % (yesNo, context))
def up_inviteeFriendResponse(self, yesNoMaybe, context):
if yesNoMaybe == 1:
base.cr.ttsFriendsManager.friendOnline(self.otherToon)
self.sendUpdate('inviteeFriendResponse', [yesNoMaybe, context])
self.notify.debug('Client: inviteeFriendResponse(%d, %d)' % (yesNoMaybe, context))
def up_inviteeAcknowledgeCancel(self, context):
self.sendUpdate('inviteeAcknowledgeCancel', [context])
self.notify.debug('Client: inviteeAcknowledgeCancel(%d)' % context)
def friendConsidering(self, yesNoAlready, context):
self.notify.info('Roger Client: friendConsidering(%d, %d)' % (yesNoAlready, context))
messenger.send('friendConsidering', [yesNoAlready, context])
def friendResponse(self, yesNoMaybe, context):
if yesNoMaybe == 1:
base.cr.ttsFriendsManager.friendOnline(self.otherToon)
self.notify.debug('Client: friendResponse(%d, %d)' % (yesNoMaybe, context))
messenger.send('friendResponse', [yesNoMaybe, context])
def inviteeFriendQuery(self, inviterId, inviterName, inviterDna, context):
self.notify.debug('Client: inviteeFriendQuery(%d, %s, dna, %d)' % (inviterId, inviterName, context))
if not hasattr(base, 'localAvatar'):
self.up_inviteeFriendConsidering(0, context)
return
if base.localAvatar.isIgnored(inviterId):
self.up_inviteeFriendConsidering(4, context)
return
if not base.localAvatar.acceptingNewFriends:
self.up_inviteeFriendConsidering(6, context)
return
self.up_inviteeFriendConsidering(self.__available, context)
self.otherToon = inviterId
if self.__available:
messenger.send('friendInvitation', [inviterId,
inviterName,
inviterDna,
context])
def inviteeCancelFriendQuery(self, context):
self.notify.debug('Client: inviteeCancelFriendQuery(%d)' % context)
messenger.send('cancelFriendInvitation', [context])
self.up_inviteeAcknowledgeCancel(context)
def requestTFCode(self, callback):
self.tfCallback = callback
self.sendUpdate('requestTFCode')
def redeemTFCode(self, code, callback):
self.tfCallback = callback
self.sendUpdate('redeemTFCode', [code])
def tfResponse(self, response, code):
self.tfCallback(response, code)
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Implementation of the 'aea interact' subcommand."""
import codecs
import os
from typing import Optional, TYPE_CHECKING, Type, Union
import click
from aea.cli.utils.constants import STUB_CONNECTION
from aea.cli.utils.decorators import check_aea_project
from aea.cli.utils.exceptions import InterruptInputException
from aea.common import Address
from aea.configurations.base import ConnectionConfig, PackageType, PublicId
from aea.configurations.constants import (
CONNECTIONS,
DEFAULT_AEA_CONFIG_FILE,
DEFAULT_PROTOCOL,
PROTOCOLS,
SIGNING_PROTOCOL,
STATE_UPDATE_PROTOCOL,
VENDOR,
)
from aea.configurations.loader import ConfigLoader
from aea.connections.base import Connection
from aea.crypto.wallet import CryptoStore
from aea.helpers.io import open_file
from aea.identity.base import Identity
from aea.mail.base import Envelope, Message
from aea.multiplexer import InBox, Multiplexer, OutBox
from aea.protocols.base import Protocol
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from aea.protocols.dialogue.base import Dialogues
if TYPE_CHECKING: # pragma: nocover
from packages.fetchai.connections.stub.connection import ( # noqa: F401
DEFAULT_INPUT_FILE_NAME,
DEFAULT_OUTPUT_FILE_NAME,
StubConnection,
)
from packages.fetchai.protocols.default.dialogues import ( # noqa: F401
DefaultDialogue,
DefaultDialogues,
)
from packages.fetchai.protocols.default.message import DefaultMessage # noqa: F401
@click.command()
@click.pass_context
@check_aea_project
def interact(
click_context: click.core.Context, # pylint: disable=unused-argument
) -> None:
"""Interact with the running agent via the stub connection."""
click.echo("Starting AEA interaction channel...")
_run_interaction_channel()
def _load_packages(agent_identity: Identity) -> None:
"""Load packages in the current interpreter."""
default_protocol_id = PublicId.from_str(DEFAULT_PROTOCOL)
Protocol.from_dir(
os.path.join(
VENDOR, default_protocol_id.author, PROTOCOLS, default_protocol_id.name
)
)
signing_protocol_id = PublicId.from_str(SIGNING_PROTOCOL)
Protocol.from_dir(
os.path.join(
VENDOR, signing_protocol_id.author, PROTOCOLS, signing_protocol_id.name
)
)
state_update_protocol_id = PublicId.from_str(STATE_UPDATE_PROTOCOL)
Protocol.from_dir(
os.path.join(
VENDOR,
state_update_protocol_id.author,
PROTOCOLS,
state_update_protocol_id.name,
)
)
stub_connection_id = PublicId.from_str(STUB_CONNECTION)
Connection.from_dir(
os.path.join(
VENDOR, stub_connection_id.author, CONNECTIONS, stub_connection_id.name,
),
agent_identity,
CryptoStore(),
os.getcwd(),
)
def _run_interaction_channel() -> None:
loader = ConfigLoader.from_configuration_type(PackageType.AGENT)
agent_configuration = loader.load(open_file(DEFAULT_AEA_CONFIG_FILE))
agent_name = agent_configuration.name
identity_stub = Identity(agent_name + "_interact", "interact")
_load_packages(identity_stub)
# load agent configuration file
from packages.fetchai.connections.stub.connection import ( # noqa: F811 # pylint: disable=import-outside-toplevel
DEFAULT_INPUT_FILE_NAME,
DEFAULT_OUTPUT_FILE_NAME,
StubConnection,
)
from packages.fetchai.protocols.default.dialogues import ( # noqa: F811 # pylint: disable=import-outside-toplevel
DefaultDialogue,
DefaultDialogues,
)
from packages.fetchai.protocols.default.message import ( # noqa: F811 # pylint: disable=import-outside-toplevel
DefaultMessage,
)
# load stub connection
configuration = ConnectionConfig(
input_file=DEFAULT_OUTPUT_FILE_NAME,
output_file=DEFAULT_INPUT_FILE_NAME,
connection_id=StubConnection.connection_id,
)
stub_connection = StubConnection(
configuration=configuration, data_dir=os.getcwd(), identity=identity_stub
)
multiplexer = Multiplexer([stub_connection])
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer)
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return DefaultDialogue.Role.AGENT
dialogues = DefaultDialogues(identity_stub.name, role_from_first_message)
try:
multiplexer.connect()
while True: # pragma: no cover
_process_envelopes(agent_name, inbox, outbox, dialogues, DefaultMessage)
except KeyboardInterrupt:
click.echo("Interaction interrupted!")
except BaseException as e: # pylint: disable=broad-except # pragma: no cover
click.echo(e)
finally:
multiplexer.disconnect()
def _process_envelopes(
agent_name: str,
inbox: InBox,
outbox: OutBox,
dialogues: Dialogues,
message_class: Type[Message],
) -> None:
"""
Process envelopes.
:param agent_name: name of an agent.
:param inbox: an inbox object.
:param outbox: an outbox object.
:param dialogues: the dialogues object.
:param message_class: the message class.
:return: None.
"""
envelope = _try_construct_envelope(agent_name, dialogues, message_class)
if envelope is None:
_check_for_incoming_envelope(inbox, message_class)
else:
outbox.put(envelope)
click.echo(_construct_message("sending", envelope, message_class))
def _check_for_incoming_envelope(inbox: InBox, message_class: Type[Message]) -> None:
if not inbox.empty():
envelope = inbox.get_nowait()
if envelope is None:
raise ValueError("Could not recover envelope from inbox.")
click.echo(_construct_message("received", envelope, message_class))
else:
click.echo("Received no new envelope!")
def _construct_message(
action_name: str, envelope: Envelope, message_class: Type[Message]
) -> str:
action_name = action_name.title()
msg = (
message_class.serializer.decode(envelope.message)
if isinstance(envelope.message, bytes)
else envelope.message
)
message = (
"\n{} envelope:\nto: "
"{}\nsender: {}\nprotocol_specification_id: {}\nmessage: {}\n".format(
action_name,
envelope.to,
envelope.sender,
envelope.protocol_specification_id,
msg,
)
)
return message
def _try_construct_envelope(
agent_name: str, dialogues: Dialogues, message_class: Type[Message]
) -> Optional[Envelope]:
"""Try construct an envelope from user input."""
envelope = None # type: Optional[Envelope]
try:
performative_str = "bytes"
performative = message_class.Performative(performative_str)
click.echo(
f"Provide message of protocol '{str(message_class.protocol_id)}' for performative {performative_str}:"
)
message_escaped = input() # nosec
message_escaped = message_escaped.strip()
if message_escaped == "":
raise InterruptInputException
message_decoded = codecs.decode(
message_escaped.encode("utf-8"), "unicode-escape"
)
message = message_decoded.encode("utf-8") # type: Union[str, bytes]
msg, _ = dialogues.create(
counterparty=agent_name, performative=performative, content=message,
)
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
except InterruptInputException:
click.echo("Interrupting input, checking inbox ...")
except KeyboardInterrupt:
raise
except BaseException as e: # pylint: disable=broad-except # pragma: no cover
click.echo(e)
return envelope
|
import nltk #Import Natural Language tool Kit library
from nltk.stem import WordNetLemmatizer #Converts words to their root words Ex: Believing to Belief
lemmatizer = WordNetLemmatizer()
import json
import pickle
import numpy as np
from keras import *
from keras.models import Sequential # plain stack of layers where each layer has exactly one input tensor and one output tensor.
from keras.layers import Dense # Create deep layers in the neural network
from keras.layers import Activation #Activate neural network layer
from keras.layers import Dropout # Drop out neurons randomly to avoid overfitting
#from keras.optimizers import SGD
import random #Select random neurons
#Initialize variables
words=[]
classes = []
documents = []
ignore_words = ['?', '!']
data_file = open('intents.json').read()
intents = json.loads(data_file)
for i in intents['intents']:
for pattern in i['patterns']:
#tokenize each word
w = nltk.word_tokenize(pattern)
words.extend(w)
#add words to the main pool
documents.append((w, i['tag']))
# add to our classes list
if i['tag'] not in classes:
classes.append(i['tag'])
# lemmaztize, lower each word, remove duplicates and sort words
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# sort classes
classes = sorted(list(set(classes)))
# documents = combination between patterns and intents
print (len(documents), "documents\n")
# classes
print (len(classes), "classes:\n", classes, '\n')
# words = all words, vocabulary
print (len(words), "unique lemmatized words:\n", words,"\n")
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))
training = [] # create training data
output_empty = [0] * len(classes) # create an empty array for output
# training set, collect bag of words from each sentence
for doc in documents:
bag = [] # initialize our bag of words
pattern_words = doc[0] # list of tokenized words for the pattern
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words] # lemmatize each word - create base word, in order to represent related words
for w in words:
bag.append(1) if w in pattern_words else bag.append(0) # create our bag of words array with 1, if word match found in current pattern
# output is a '0' for each tag and '1' for current tag (for each pattern)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row]) #Append all bag of words as one training set
random.shuffle(training) # shuffle our features randomly
training = np.array(training) #Assign randomly shuffled training set as an np.array
# create train and test lists. X - patterns i.e., understand the conversation flow of the user, Y - intents i.e., task or action that the chatbot user wants to perform
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")
# Create model - 3 layers.
#First layer with 128 neurons, second layer with 64 neurons and 3rd output layer with the number of neurons that are equal to number of intents to predict output intent with softmax. Significance of no's used in each layers - they are all powers of 2s i.e., matrix multiplications (2x2) which is the heart of deep learning
model = Sequential() #Create 3 linear stack of layers
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu')) #Add 1st dense layer with a Rectified Linear Unit activation. A linear activation function is simply the sum of the weighted input to the node, required as input for any activation function
model.add(Dropout(0.5)) #create a dropout layer with a 50% chance of setting inputs to zero to avoid overfitting
model.add(Dense(64, activation='relu')) #Add 2nd dense layer with a Rectified Linear Unit activation. A linear activation function is simply the sum of the weighted input to the node, required as input for any activation function
model.add(Dropout(0.5))#create a dropout layer with a 50% chance of setting inputs to zero to avoid overfitting
model.add(Dense(len(train_y[0]), activation='softmax')) #Add 3rd dense layer with Softmax function i.e., a softened version of the argmax function that returns the index of the largest value in a list. The max node array value will be the output of the layer and all other nodes output will be 0
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
#opt= SGD(learning_rate=0.01,decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
#loss = categorical crossentropy - Computes the cross-entropy (Average number of bits required to send the message from distribution A to Distribution B) loss between true labels and predicted labels
#optimizer= SGD - Stochastic Gradient Descent (Default Optimizer) - uses a randomly selected instance from the training data to estimate the gradient. This generally leads to faster convergence, but the steps are noisier because each step is an estimate. Gradient descent refers to the steepest rate of descent down a gradient or slope to minimize the value of the loss function as the machine learning model iterates through more and more epochs
#Metrics = Accuracy - Calculates how often predictions equal labels
#fitting and saving the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=250, batch_size=5, verbose=1)
#np.array(train_x) - Numpy array training patterns
#np.array(train_y) - Numpy array training intents
#epochs - Integer- Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided
#Batchsize - Number of samples per gradient update; Default: 32
#Verbose=1; Default value- logs the training progress of each epochs
model.save('chatbot_model.h5', hist)
print('No of Patterns:',(len(train_x[0])))
print('No of Intents:',(len(train_y[0])))
print("model created")
|
class Defuzzy:
@staticmethod
def centroid(fuzzy):
a = sum([x * fuzzy.function(x) for x in fuzzy.domain])
b = sum([fuzzy.function(x) for x in fuzzy.domain])
return a/b
#Todo
@staticmethod
def bisectriz(fuzzy):
area = 0
image = [fuzzy.function(x) for x in fuzzy.domain]
pairs = zip(fuzzy.domain, image)
for x,y in pairs:
area += x*y
area_acc = 0
index = 0
while(area/2 >= area_acc):
area_acc += fuzzy.domain[index] * image[index]
index += 1
return fuzzy.domain[index-1]
@staticmethod
def central_max(fuzzy):
values = _find_maxs(fuzzy)
return values[int(len(values)/2)]
@staticmethod
def smallest_max(fuzzy):
return min(_find_maxs(fuzzy))
@staticmethod
def bigger_max(fuzzy):
return max(_find_maxs(fuzzy))
def _find_maxs(fuzzy):
_max = 0
for i in fuzzy.domain:
if fuzzy.function(i) > _max:
_max = fuzzy.function(i)
return [ i for i in fuzzy.domain if fuzzy.function(i) == _max]
|
"""This module implements functions related to the usage of AWS Sagemaker"""
import json
import logging
import time
import sagemaker
from sagemaker import ModelPackage
logger = logging.getLogger(__name__)
class ModelPackageArnProvider:
"""This class provides ARNs to SSD and YOLOv3 models for different regions of AWS Sagemaker."""
@staticmethod
def get_yolov3_model_package_arn(current_region: str) -> str:
"""Returns ARN for YOLOv3 model in the specified region.
Args:
current_region (str): AWS region
Returns:
str: ARN for YOLOv3 in the specified region.
"""
mapping = {
"sa-east-1": "arn:aws:sagemaker:sa-east-1:270155090741:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"eu-west-3": "arn:aws:sagemaker:eu-west-3:843114510376:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"eu-north-1": "arn:aws:sagemaker:eu-north-1:136758871317:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/gluoncv-yolo3-darknet531547760-bdf604d6d9c12bf6194b6ae534a638b2",
}
return mapping[current_region]
@staticmethod
def get_ssd_model_package_arn(current_region: str) -> str:
"""Returns ARN for SSD-Resnet50 model in the specified region.
Args:
current_region (str): AWS region
Returns:
str: ARN for SSD-Resnet50 in the specified region.
"""
mapping = {
"sa-east-1": "arn:aws:sagemaker:sa-east-1:270155090741:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ap-south-1": "arn:aws:sagemaker:ap-south-1:077584701553:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ap-northeast-2": "arn:aws:sagemaker:ap-northeast-2:745090734665:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ap-southeast-1": "arn:aws:sagemaker:ap-southeast-1:192199979996:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ap-southeast-2": "arn:aws:sagemaker:ap-southeast-2:666831318237:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ap-northeast-1": "arn:aws:sagemaker:ap-northeast-1:977537786026:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"ca-central-1": "arn:aws:sagemaker:ca-central-1:470592106596:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"eu-central-1": "arn:aws:sagemaker:eu-central-1:446921602837:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"eu-west-1": "arn:aws:sagemaker:eu-west-1:985815980388:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"eu-west-2": "arn:aws:sagemaker:eu-west-2:856760150666:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"eu-west-3": "arn:aws:sagemaker:eu-west-3:843114510376:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"eu-north-1": "arn:aws:sagemaker:eu-north-1:136758871317:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"us-east-1": "arn:aws:sagemaker:us-east-1:865070037744:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"us-east-2": "arn:aws:sagemaker:us-east-2:057799348421:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"us-west-1": "arn:aws:sagemaker:us-west-1:382657785993:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
"us-west-2": "arn:aws:sagemaker:us-west-2:594846645681:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0",
}
return mapping[current_region]
def deploy_model(
role,
num_instances,
model_arn,
instance_type,
model_name,
output_path,
max_concurrent_transforms=2,
):
model = ModelPackage(
role=role, model_package_arn=model_arn, sagemaker_session=sagemaker.Session()
)
# model.deploy(num_instances, instance_type, endpoint_name=model_name)
transformer = model.transformer(
instance_count=num_instances,
instance_type=instance_type,
output_path=output_path,
max_concurrent_transforms=max_concurrent_transforms,
)
return model, transformer
def batch_transform(data, transformer, batch_output, content_type):
ts0 = time.time()
transformer.transform(
data=data,
data_type="S3Prefix",
content_type=content_type,
input_filter="$",
join_source="None",
output_filter="$",
)
ts_create = time.time() - ts0
ts0 = time.time()
transformer.wait()
ts_exec = time.time() - ts0
logger.info(
f"Batch Transform job created in {ts_create:.2f} seconds and executed in {ts_exec:.2f} seconds."
)
assert batch_output == transformer.output_path
output = transformer.output_path
return output
def invoke_DL_endpoint(
image_path, runtime, endpoint_name, content_type="image/png", bounding_box="no"
):
img = open(image_path, "rb").read()
response = runtime.invoke_endpoint(
EndpointName=endpoint_name,
Body=bytearray(img),
ContentType=content_type,
CustomAttributes='{"threshold": 0.2}',
Accept="json",
)
result = json.loads(response["Body"].read().decode("utf-8"))
return result
def get_default_bucket() -> str:
"""Returns default bucket of the Sagemaker session.
Returns:
str: default bucket in s3 of the Sagemaker session.
"""
return sagemaker.Session().default_bucket()
|
# -*- coding: utf-8 -*-
import time
import logging
from tic_toc import Timer
log_fmt = '[%(asctime)s:%(msecs)04d] - %(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(format=log_fmt, datefmt=datefmt, level=logging.INFO)
log = logging.getLogger('asyncio')
with Timer('NAME', to=log.info) as timer:
log.info('Scope: ' + timer.name)
log.info('...')
time.sleep(1)
# [2018-10-21 14:30:31:0332] - asyncio - INFO - > NAME ...
# [2018-10-21 14:30:31:0333] - asyncio - INFO - Scope: NAME
# [2018-10-21 14:30:31:0333] - asyncio - INFO - ...
# [2018-10-21 14:30:32:0337] - asyncio - INFO - < NAME [WALL: 1.0042s] [CPU: 0.0003s]
|
# qubit number=5
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += X(2) # number=26
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += Y(3) # number=25
prog += X(0) # number=9
prog += H(1) # number=32
prog += CZ(0,1) # number=33
prog += H(1) # number=34
prog += CNOT(0,1) # number=35
prog += CNOT(0,1) # number=38
prog += X(1) # number=39
prog += CNOT(0,1) # number=40
prog += CNOT(0,1) # number=37
prog += CNOT(0,1) # number=30
prog += CNOT(0,2) # number=22
prog += X(2) # number=23
prog += Y(3) # number=27
prog += CNOT(0,2) # number=24
prog += X(3) # number=12
prog += CNOT(1,2) # number=31
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil964.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example extension package of TVM."""
from __future__ import absolute_import
import os
import ctypes
# Import TVM first to get library symbols
import tvm
from tvm import te
def load_lib():
"""Load library, the functions will be registered into TVM"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# load in as global so the global extern symbol is visible to other dll.
lib = ctypes.CDLL(os.path.join(curr_path, "../../lib/libtvm_ext.so"), ctypes.RTLD_GLOBAL)
return lib
_LIB = load_lib()
# Expose two functions into python
bind_add = tvm.get_global_func("tvm_ext.bind_add")
sym_add = tvm.get_global_func("tvm_ext.sym_add")
ivec_create = tvm.get_global_func("tvm_ext.ivec_create")
ivec_get = tvm.get_global_func("tvm_ext.ivec_get")
@tvm.register_object("tvm_ext.IntVector")
class IntVec(tvm.Object):
"""Example for using extension class in c++ """
@property
def _tvm_handle(self):
return self.handle.value
def __getitem__(self, idx):
return ivec_get(self, idx)
nd_create = tvm.get_global_func("tvm_ext.nd_create")
nd_add_two = tvm.get_global_func("tvm_ext.nd_add_two")
nd_get_additional_info = tvm.get_global_func("tvm_ext.nd_get_additional_info")
@tvm.register_object("tvm_ext.NDSubClass")
class NDSubClass(tvm.nd.NDArrayBase):
"""Example for subclassing TVM's NDArray infrastructure.
By inheriting TVM's NDArray, external libraries could
leverage TVM's FFI without any modification.
"""
@staticmethod
def create(additional_info):
return nd_create(additional_info)
@property
def additional_info(self):
return nd_get_additional_info(self)
def __add__(self, other):
return nd_add_two(self, other)
|
"""
Client Example: Count Objects
--------------------------------------------------------------------
Count all available Something objects connecting to simple-example app
"""
from typing import Optional, List
from base64 import b64decode
from hopeit.app.api import event_api
from hopeit.app.context import EventContext
from hopeit.app.logger import app_extra_logger
from hopeit.app.client import app_call, app_call_list
from hopeit.basic_auth import AuthInfo
from hopeit.dataobjects import dataobject, dataclass
from hopeit.toolkit import auth
from hopeit.server.web import Unauthorized
from model import Something, SomethingParams
from client_example import CountAndSaveResult
__steps__ = ['ensure_login', 'count_objects', 'save_object']
__api__ = event_api(
summary="Client Example: Count Objects and Save new one",
query_args=[
('wildcard', Optional[str], "Wildcard to filter objects by name")
],
responses={
200: (CountAndSaveResult, "Count of Something objects returned by simple-example call")
}
)
logger, extra = app_extra_logger()
@dataobject
@dataclass
class ListOptions:
wildcard: str
async def ensure_login(payload: None, context: EventContext, wildcard: str = '*') -> ListOptions:
"""
Using Basic auth credentials in context attempts login to server side app and validates
login response comes from attempted source using public keys. Then, the following steps
will execute using the client Bearer token.
This example shows how to ensure both client and server apps can trust each other by having
installed the counterpart's public key on their running environments.
"""
auth_response = await app_call(
"simple_example_auth_conn",
event="login", datatype=AuthInfo, payload=None, context=context
)
auth_info = auth.validate_token(auth_response.access_token, context)
if auth_info is None:
raise Unauthorized("Client app does not recognize server login response (using public key)")
logger.info(context, "Logged in to app", extra=extra(app=auth_info['app'], user=auth_info['user']))
return ListOptions(wildcard=wildcard)
async def count_objects(options: ListOptions, context: EventContext) -> int:
response: List[Something] = await app_call_list(
"simple_example_conn",
event="list_somethings", datatype=Something,
payload=None, context=context, wildcard=options.wildcard
)
return len(response)
async def save_object(count: int, context: EventContext, wildcard: str = '*') -> CountAndSaveResult:
user = b64decode(context.auth_info['payload'].encode()).decode().split(':', maxsplit=1)[0]
params = SomethingParams(id=f"id{count}", user=user)
saved: str = await app_call(
"simple_example_conn",
event="save_something", datatype=str,
payload=params, context=context
)
return CountAndSaveResult(count=count, save_path=saved)
|
from typing import Tuple
import pygame
from pygame_gui import UIManager
import pygame_gui
from pygame_gui.elements.ui_window import UIWindow
from pygame_gui.elements.ui_text_box import UITextBox
from talktown.person.person import Person
from talktown.place import Building
class CharacterInfoWindow(UIWindow):
"""
Wraps a pygame_ui panel to display information
about a given character
"""
__slots__ = 'character', 'ui_manager', 'panel'
def __init__(self, character: 'Person', position: Tuple[int, int], ui_manager: 'UIManager') -> None:
super().__init__(
pygame.Rect(position, (320, 240)),
ui_manager,
window_display_title=character.name,
object_id=f'{character.id}')
self.character = character
self.ui_manager = ui_manager
self.text = UITextBox(
f"name: {character.name}<br>"
f"age: {round(character.age)}<br>"
f"gender: {character.gender}<br>"
f"occupation: {character.occupation if character.occupation else 'None'}<br>",
pygame.Rect(0, 0, 320, 240),
manager=ui_manager,
container=self,
parent_element=self,
)
def process_event(self, event: pygame.event.Event) -> bool:
handled = super().process_event(event)
if (
event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_object_id == "#character_window.#title_bar" and
event.ui_element == self.title_bar
):
handled = True
event_data = {
'user_type': 'character_window_selected',
'ui_element': self,
'ui_object_id': self.most_specific_combined_id
}
window_selected_event = pygame.event.Event(
pygame.USEREVENT, event_data)
pygame.event.post(window_selected_event)
return handled
class BuildingInfoWindow(UIWindow):
"""
Wraps a pygame_ui panel to display information
about a given building
"""
def __init__(self, building: 'Building', ui_manager: 'UIManager') -> None:
super().__init__(
pygame.Rect((10, 10), (320, 240)),
ui_manager,
window_display_title=building.building_type,
object_id=f'building_win')
#
# class BusinessInfoWindow(UIWindow):
# """
# Wraps a pygame_ui panel to display information
# about a given business
# """
#
# def __init__(self, business: 'Business', sim: 'Simulation') -> None:
# pass
#
#
# class ResidenceInfoWindow(UIWindow):
# """
# Wraps a pygame_ui panel to display information
# about a given residence
# """
#
# def __init__(self, residence: 'Residence') -> None:
# pass
def show_building_window(building: 'Building', ui_manager: UIManager) -> UIWindow:
"""Creates a UIWindow for the given building"""
return BuildingInfoWindow(building, ui_manager)
|
'''
XlPy/matched/Proteome_Discoverer/base
_____________________________________
Inheritable objects with methods to calculate Proteome Discoverer
peptide ID formulas PPMs, and standardize the peptide sequences
(use of mixed case).
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# losd modules
import copy
from xldlib import chemical
from xldlib.utils import logger, masstools
# load objects/functions
from libs.definitions import ZIP
from ..base import MatchedPeptideBase
from ..core import get_engine, unpack_mods
class ProteomeDiscovererUtils(object):
'''Provides useful methods for formula and PPM calculation'''
def calculate_formula(self, peptide, mods):
'''
Calculates the exact formula for the peptide without XLer
modification
'''
atom_counts = chemical.Molecule(peptide=peptide)
mods = unpack_mods(mods)
for name, pos in mods.items():
if name not in self.fragments and name in self.engine['mods']:
formula = self.engine['mods'][name][0]
count = len(pos)
atom_counts.update_formula(formula, count=count)
return atom_counts
def calculate_ppm(self, atom_counts, mods, exper, charge):
'''Calculates the theroetical PPM from the exper mass'''
atom_counts = copy.deepcopy(atom_counts)
mods = unpack_mods(mods)
for name, pos in mods.items():
if name in self.fragments:
formula = self.fragments[name]
count = len(pos)
atom_counts.update_formula(formula, count=count)
theor = masstools.mz(atom_counts.mass, charge, 0)
return (theor - exper) / exper * 1e6
def upper_sequences(self):
'''Converts all the annotated, lowercase residues to uppercase'''
self.data['peptide'] = [i.upper() for i in self.data['peptide']]
def calculate_ppms(self):
'''Calculates the theoretical PPMs for each entry'''
self.data.setdefault('formula', [])
keys = ['peptide', 'mods', 'm/z', 'z']
for peptide, mod, exper, charge in ZIP(*map(self.data.get, keys)):
formula = self.calculate_formula(peptide, mod)
self.data['formula'].append(formula)
ppm = self.calculate_ppm(formula, mod, exper, charge)
self.data['ppm'].append(ppm)
class ProteomeDiscovererBase(MatchedPeptideBase, ProteomeDiscovererUtils):
'''Base class of the Proteome Discoverer parser'''
def __init__(self, source):
super(ProteomeDiscovererBase, self).__init__(source)
logger.Logging.info("Initializing base class for "
"Proteome Discoverer....")
self.fragments = self.worker.modifications['fragments']
self.engine = get_engine(self.data)
# self.matched = self.get_matched("proteome_discoverer", "1.3SQLite")
# self.scoring = self.matched['scoring']
def store_search(self):
'''Only has the raw file name, which will be assumed to be project'''
fraction = None
if self.data['fraction']:
fraction = self.data['fraction'][0]
self.data['project'] = fraction
self.data['search'] = None
|
'''
/django_api/Cas/models.py
-------------------------
Model of Cas
'''
from django.db import models
from django.utils import timezone
# Cas model
class Cas(models.Model):
# Account
username = models.CharField(max_length=100, default='None')
# PWD
password = models.TextField()
# Role
role = models.CharField(max_length=20, default='editor')
# role = ArrayField(ArrayField(models.CharField()))
# 创建时间
created = models.DateTimeField(default=timezone.now)
# 更新时间
updated = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'Cas'
def __str__(self):
return 'User {}'.format(self.id)
|
from flask_sqlalchemy import SQLAlchemy
import inspect
import traceback
db_functions = []
def db_function(name):
"""
Use as decorator, append f to db_functions.
"""
def d(f):
def w(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
traceback.print_exc()
raise
db_functions.append((name, len(inspect.getargspec(f)[0]), w))
return f
return d
db = SQLAlchemy()
def setup_db(app):
"""
Create database connection and import models.
"""
# initialize Flask-SQLAlchemy following app factory pattern
# http://flask.pocoo.org/docs/0.11/patterns/appfactories/
db.init_app(app)
# Create the SQLAlchemy connection.
with app.app_context():
@db.event.listens_for(db.engine, 'begin')
def register_functions(conn):
for f in db_functions:
conn.connection.create_function(f[0], f[1], f[2])
# Register models, functions and views.
from .. import redeclipse, views # noqa
from . import models # noqa
with app.app_context():
redeclipse.functions.build_precache()
redeclipse.versions.build_precache()
|
from .did_doc_alice import (
DID_DOC_ALICE_WITH_NO_SECRETS,
DID_DOC_ALICE_SPEC_TEST_VECTORS,
)
from .did_doc_bob import DID_DOC_BOB_SPEC_TEST_VECTORS, DID_DOC_BOB_WITH_NO_SECRETS
from .did_doc_charlie import DID_DOC_CHARLIE
from .did_doc_mediator1 import DID_DOC_MEDIATOR1
from .did_doc_mediator2 import DID_DOC_MEDIATOR2
__all__ = [
"DID_DOC_ALICE_WITH_NO_SECRETS",
"DID_DOC_ALICE_SPEC_TEST_VECTORS",
"DID_DOC_BOB_SPEC_TEST_VECTORS",
"DID_DOC_BOB_WITH_NO_SECRETS",
"DID_DOC_CHARLIE",
"DID_DOC_MEDIATOR1",
"DID_DOC_MEDIATOR2",
]
|
# -*- coding: utf-8 -*-
""" Sahana Eden Setup Model:
* Installation of a Deployment
* Configuration of a Deployment
* Managing a Deployment (Start/Stop/Clean instances)
* Monitoring of a Deployment
* Upgrading a Deployment (tbc)
@copyright: 2015-2020 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DNSModel",
"S3GandiDNSModel",
"S3GoDaddyDNSModel",
"S3CloudModel",
"S3AWSCloudModel",
"S3OpenStackCloudModel",
"S3EmailProviderModel",
"S3GoogleEmailModel",
"S3SMTPModel",
"S3SetupDeploymentModel",
"S3SetupMonitorModel",
"setup_instance_settings_read",
"setup_monitor_run_task",
"setup_monitor_task_restart",
"setup_monitor_check_email_reply",
#"setup_write_playbook",
"setup_run_playbook",
#"setup_DeploymentRepresent",
#"setup_MonitorTaskRepresent",
#"Storage2",
"setup_rheader",
)
import json
import os
import random
import string
import sys
import time
from gluon import *
from ..s3 import *
from s3compat import basestring
TIME_FORMAT = "%b %d %Y %H:%M:%S"
MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n"
WEB_SERVERS = {#1: "apache",
2: "cherokee",
3: "nginx",
}
DB_SERVERS = {#1: "mysql",
2: "postgresql",
##3: "sqlite",
}
INSTANCE_TYPES = {1: "prod",
2: "setup",
3: "test",
4: "demo",
}
# =============================================================================
class S3DNSModel(S3Model):
"""
Domain Name System (DNS) Providers
- super-entity
"""
names = ("setup_dns",
"setup_dns_id",
)
def model(self):
T = current.T
db = current.db
#----------------------------------------------------------------------
# Super entity
#
dns_types = Storage(setup_gandi_dns = T("Gandi LiveDNS"),
setup_godaddy_dns = T("GoDaddy"),
)
tablename = "setup_dns"
self.super_entity(tablename, "dns_id",
dns_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?")
# #represent = s3_yes_no_represent,
# ),
#on_define = lambda table: \
# [table.instance_type.set_attributes(readable = True),
# ],
)
# Reusable Field
represent = S3Represent(lookup = tablename)
dns_id = S3ReusableField("dns_id", "reference %s" % tablename,
label = T("DNS Provider"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_dns.dns_id",
represent,
sort = True
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("DNS Provider"),
T("If you use a DNS Provider configuration then you can create/update the DNS entry automatically as part of the deployment.")
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"setup_dns_id": dns_id,
}
# =============================================================================
class S3GandiDNSModel(S3DNSModel):
"""
Gandi LiveDNS
- DNS Provider Instance
https://doc.livedns.gandi.net/
"""
names = ("setup_gandi_dns",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
tablename = "setup_gandi_dns"
self.define_table(tablename,
self.super_link("dns_id", "setup_dns"),
Field("name",
requires = IS_NOT_EMPTY(),
),
Field("description"),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?"),
# represent = s3_yes_no_represent,
# ),
Field("api_key", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
# Currently only supports a single Domain per DNS configuration
Field("domain", # Name
requires = IS_NOT_EMPTY(),
),
# Currently only supports a single Zone per DNS configuration
Field("zone", # UUID
requires = IS_NOT_EMPTY(),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "setup_dns",
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3GoDaddyDNSModel(S3DNSModel):
"""
GoDaddy DNS
- DNS Provider Instance
https://developer.godaddy.com/
"""
names = ("setup_godaddy_dns",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
tablename = "setup_godaddy_dns"
self.define_table(tablename,
self.super_link("dns_id", "setup_dns"),
Field("name",
requires = IS_NOT_EMPTY(),
),
Field("description"),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?"),
# represent = s3_yes_no_represent,
# ),
# Currently only supports a single Domain per DNS configuration
Field("domain", # Name
requires = IS_NOT_EMPTY(),
),
Field("api_key",
requires = IS_NOT_EMPTY(),
),
Field("secret", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "setup_dns",
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3CloudModel(S3Model):
"""
Clouds
- super-entity
"""
names = ("setup_cloud",
"setup_cloud_id",
)
def model(self):
T = current.T
db = current.db
#----------------------------------------------------------------------
# Super entity
#
cloud_types = Storage(setup_aws_cloud = T("Amazon Web Services"),
setup_openstack_cloud = T("OpenStack"),
)
tablename = "setup_cloud"
self.super_entity(tablename, "cloud_id",
cloud_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?")
# #represent = s3_yes_no_represent,
# ),
#on_define = lambda table: \
# [table.instance_type.set_attributes(readable = True),
# ],
)
# Reusable Field
represent = S3Represent(lookup = tablename)
cloud_id = S3ReusableField("cloud_id", "reference %s" % tablename,
label = T("Cloud"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_cloud.cloud_id",
represent,
sort = True
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Cloud"),
T("If you use a Cloud configuration then you can create the server(s) automatically as part of the deployment.")
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"setup_cloud_id": cloud_id,
}
# =============================================================================
class S3AWSCloudModel(S3CloudModel):
"""
Amazon Web Services
- Cloud Instance
https://docs.ansible.com/ansible/latest/scenario_guides/guide_aws.html
https://docs.ansible.com/ansible/latest/modules/ec2_module.html
"""
names = ("setup_aws_cloud",
"setup_aws_server",
)
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# AWS Cloud Configuration
#
tablename = "setup_aws_cloud"
define_table(tablename,
# Instance of Super-Entity
self.super_link("cloud_id", "setup_cloud"),
Field("name",
requires = IS_NOT_EMPTY(),
),
Field("description"),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?"),
# represent = s3_yes_no_represent,
# ),
Field("access_key", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
Field("secret_key", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
*s3_meta_fields())
configure(tablename,
super_entity = "setup_cloud",
)
# ---------------------------------------------------------------------
# AWS Server Details
#
#aws_instance_types = ["t3.micro",
# ]
#aws_regions = {"eu-west-2": "Europe (London)",
# }
tablename = "setup_aws_server"
define_table(tablename,
self.setup_server_id(),
Field("region",
default = "eu-west-2", # Europe (London)
#label = T("Region"),
#requires = IS_IN_SET(aws_regions),
#represent = S3Represent(options = aws_regions)
),
Field("instance_type",
default = "t3.micro",
#label = T("Instance Type"),
#requires = IS_IN_SET(aws_instance_types),
),
Field("image",
# https://wiki.debian.org/Cloud/AmazonEC2Image/Buster
default = "ami-042796b8e41bb5fad", # Debian 10 in London
#label = T("Image"), # AMI ID
),
Field("reserved_instance", "boolean",
default = False,
#label = T("Reserved Instance"),
),
Field("security_group",
default = "default",
#label = T("Security Group"),
),
Field("instance_id",
#label = T("Instance ID"),
# Normally populated automatically:
writable = False,
),
*s3_meta_fields())
configure(tablename,
ondelete = self.setup_aws_server_ondelete,
)
# ---------------------------------------------------------------------
return {}
# -------------------------------------------------------------------------
@staticmethod
def setup_aws_server_ondelete(row):
"""
Cleanup Tasks when a Server is Deleted
- AWS Instance
- AWS Keypair
"""
db = current.db
s3db = current.s3db
stable = s3db.setup_server
cstable = s3db.setup_aws_server
dtable = s3db.setup_deployment
ctable = s3db.setup_aws_cloud
# Only deleted_fks are in the row object
aws_server = db(cstable.id == row.id).select(cstable.region,
cstable.instance_id,
limitby = (0, 1)
).first()
region = aws_server.region
query = (stable.id == row.server_id) & \
(dtable.id == stable.deployment_id) & \
(dtable.cloud_id == ctable.cloud_id)
deployment = db(query).select(ctable.access_key,
ctable.secret_key,
stable.name,
limitby = (0, 1)
).first()
server_name = deployment["setup_server.name"]
cloud = deployment["setup_aws_cloud"]
access_key = cloud.access_key
secret_key = cloud.secret_key
playbook = [{"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": [# Terminate AWS Instance
{"ec2": {"aws_access_key": access_key,
"aws_secret_key": secret_key,
"region": region,
"instance_ids": aws_server.instance_id,
"state": "absent",
},
},
# Delete Keypair
{"ec2_key": {"aws_access_key": access_key,
"aws_secret_key": secret_key,
"region": region,
"name": server_name,
"state": "absent",
},
},
],
},
]
# Write Playbook
name = "aws_server_ondelete_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run Playbook
#task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# =============================================================================
class S3OpenStackCloudModel(S3CloudModel):
"""
OpenStack
- Cloud Instance
https://www.openstack.org
https://docs.ansible.com/ansible/latest/modules/os_server_module.html
"""
names = ("setup_openstack_cloud",
"setup_openstack_server",
)
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# OpenStack Cloud Configuration
#
tablename = "setup_openstack_cloud"
define_table(tablename,
# Instance of Super-Entity
self.super_link("cloud_id", "setup_cloud"),
Field("name",
requires = IS_NOT_EMPTY(),
),
Field("description"),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?"),
# represent = s3_yes_no_represent,
# ),
Field("auth_url",
requires = IS_URL(),
),
Field("username",
requires = IS_NOT_EMPTY(),
),
Field("password", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
Field("project_name",
requires = IS_NOT_EMPTY(),
),
Field("domain_name",
default = "Default",
requires = IS_NOT_EMPTY(),
),
*s3_meta_fields())
configure(tablename,
super_entity = "setup_cloud",
)
# ---------------------------------------------------------------------
# OpenStack Server Details
#
tablename = "setup_openstack_server"
define_table(tablename,
self.setup_server_id(),
Field("instance_type",
default = "m1.small", # Varies by Deployment, this matches OSUOSL
#label = T("Flavor"),
#requires = IS_IN_SET(openstack_instance_types), # Varies by Deployment
),
Field("image",
default = "Debian 10.1", # Varies by Deployment, this matches OSUOSL
#label = T("Image"), # Image Name or ID
),
Field("volume_size", "integer",
default = 8, # Gb
#label = T("Volume Size (Gb)"),
),
Field("network",
default = "general_servers1", # Varies by Deployment, this matches OSUOSL
#label = T("Security Group"),
),
Field("security_group",
default = "default",
#label = T("Security Group"),
),
Field("region",
default = "RegionOne", # Varies by Deployment, this matches OSUOSL
#label = T("Region"),
#requires = IS_IN_SET(openstack_regions), # Varies by Deployment
#represent = S3Represent(options = aws_regions)
),
Field("availability_zone",
default = "nova", # Varies by Deployment, this matches OSUOSL
#label = T("Availability Zone"),
),
*s3_meta_fields())
configure(tablename,
ondelete = self.setup_openstack_server_ondelete,
)
# ---------------------------------------------------------------------
return {}
# -------------------------------------------------------------------------
@staticmethod
def setup_openstack_server_ondelete(row):
"""
Cleanup Tasks when a Server is Deleted
- OpenStack Instance
- OpenStack Keypair
"""
db = current.db
s3db = current.s3db
stable = s3db.setup_server
dtable = s3db.setup_deployment
ctable = s3db.setup_openstack_cloud
query = (stable.id == row.server_id) & \
(dtable.id == stable.deployment_id) & \
(dtable.cloud_id == ctable.cloud_id)
deployment = db(query).select(ctable.auth_url,
ctable.username,
ctable.password,
ctable.project_name,
ctable.domain_name,
stable.name,
limitby = (0, 1)
).first()
server_name = deployment["setup_server.name"]
cloud = deployment["setup_openstack_cloud"]
auth = {"auth_url": cloud.auth_url,
"username": cloud.username,
"password": cloud.password,
"project_name": cloud.project_name,
"domain_name": cloud.domain_name,
}
playbook = [{"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": [# Terminate OpenStack Instance
{"os_server": {"auth": auth,
"name": server_name,
"state": "absent",
},
},
# Delete Keypair
{"os_keypair": {"auth": auth,
"name": server_name,
"state": "absent",
},
},
],
},
]
# Write Playbook
name = "openstack_server_ondelete_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run Playbook
#task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# =============================================================================
class S3EmailProviderModel(S3Model):
"""
Email Providers (we just use Groups currently)
- super-entity
"""
names = ("setup_email",
"setup_email_id",
)
def model(self):
T = current.T
db = current.db
#----------------------------------------------------------------------
# Super entity
#
email_types = Storage(setup_google_email = T("Google"),
)
tablename = "setup_email"
self.super_entity(tablename, "email_id",
email_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?")
# #represent = s3_yes_no_represent,
# ),
#on_define = lambda table: \
# [table.instance_type.set_attributes(readable = True),
# ],
)
# Reusable Field
represent = S3Represent(lookup = tablename)
email_id = S3ReusableField("email_id", "reference %s" % tablename,
label = T("Email Group Provider"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_email.email_id",
represent,
sort = True
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Email Group Provider"),
T("If you use an Email Group Provider configuration then you can create/update the Email Sender entry automatically as part of the deployment.")
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"setup_email_id": email_id,
}
# =============================================================================
class S3GoogleEmailModel(S3EmailProviderModel):
"""
Google
- Email Group Provider Instance
https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups
https://github.com/googleapis/google-api-python-client
# NB Only supports Python 3.x
"""
names = ("setup_google_email",
"setup_google_instance",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
tablename = "setup_google_email"
define_table(tablename,
self.super_link("email_id", "setup_email"),
Field("name",
requires = IS_NOT_EMPTY(),
),
Field("description"),
#Field("enabled", "boolean",
# default = True,
# #label = T("Enabled?"),
# represent = s3_yes_no_represent,
# ),
Field("credentials", "json",
requires = IS_JSONS3(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Credentials"),
T("JSON of the Service Account.")
),
),
),
Field("email",
requires = IS_EMAIL(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Email"),
T("A User account with Administrative access.")
),
),
),
*s3_meta_fields())
configure(tablename,
super_entity = "setup_email",
)
# ---------------------------------------------------------------------
# Google Instance Details
#
tablename = "setup_google_instance"
define_table(tablename,
self.setup_instance_id(),
Field("name",
label = T("Group Name"),
requires = IS_NOT_EMPTY(),
),
Field("email",
label = T("Group Email"),
requires = IS_EMAIL(),
),
Field("member",
label = T("Member Email"),
requires = IS_EMAIL(),
),
Field("group_id",
# Normally populated automatically
writable = False,
),
*s3_meta_fields())
configure(tablename,
ondelete = self.setup_google_instance_ondelete,
)
# ---------------------------------------------------------------------
return {}
# -------------------------------------------------------------------------
@staticmethod
def setup_google_instance_ondelete(row):
"""
Cleanup Tasks when a Server is Deleted
- Google Group
"""
from google.oauth2 import service_account
from googleapiclient.discovery import build
db = current.db
s3db = current.s3db
itable = s3db.setup_instance
gitable = s3db.setup_google_instance
dtable = s3db.setup_deployment
gtable = s3db.setup_google_email
# Only deleted_fks are in the row object
instance_id = row.instance_id
instance = db(gitable.id == row.id).select(gitable.group_id,
limitby = (0, 1)
).first()
query = (itable.id == instance_id) & \
(dtable.id == itable.deployment_id) & \
(dtable.email_id == gtable.id)
deployment = db(query).select(gtable.credentials,
gtable.email,
limitby = (0, 1)
).first()
creds_path = os.path.join("/", "tmp", "credentials-%s.json" % instance_id)
with open(creds_path, "w") as creds_file:
creds_file.write(json.dumps(deployment.credentials))
credentials = service_account.Credentials.from_service_account_file(
creds_path,
scopes = ["https://www.googleapis.com/auth/admin.directory.group"]
)
credentials = credentials.with_subject(deployment.email)
service = build("admin", "directory_v1", credentials=credentials)
results = service.groups().delete(groupKey = instance.group_id).execute()
if results is not None:
current.session.warning("Couldn't delete Email Group: %s" % result)
os.unlink(creds_path)
# =============================================================================
class S3SMTPModel(S3Model):
"""
SMTP Smart Hosts
- tested with:
* AWS SES - free for 62,000 mails/month if hosting on AWS
* SendGrid - free for 100 mails/day
"""
names = ("setup_smtp",
"setup_smtp_id",
)
def model(self):
T = current.T
db = current.db
#----------------------------------------------------------------------
# SMTP Smart Host configurations
#
tablename = "setup_smtp"
self.define_table(tablename,
Field("name",
label = T("Name"),
requires = IS_NOT_EMPTY(),
),
Field("description",
label = T("Description"),
),
#Field("enabled", "boolean",
# default = True,
# label = T("Enabled?")
# #represent = s3_yes_no_represent,
# ),
Field("hostname",
# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-connect.html
default = "smtp.sendgrid.net",
label = T("Host name"),
),
# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html
Field("username",
default = "apikey", # Sendgrid
label = T("User name"),
requires = IS_NOT_EMPTY(),
),
Field("password", "password",
label = T("Password"),
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
)
# Reusable Field
represent = S3Represent(lookup = tablename)
smtp_id = S3ReusableField("smtp_id", "reference %s" % tablename,
label = T("SMTP Smart Host"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_smtp.id",
represent,
sort = True
),
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("SMTP Smart Host"),
T("If you use an SMTP Smart Host, then you can configure your deployment to use it.")
)
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"setup_smtp_id": smtp_id,
}
# =============================================================================
class S3SetupDeploymentModel(S3Model):
names = ("setup_deployment",
"setup_deployment_id",
"setup_server",
"setup_server_id",
"setup_instance",
"setup_instance_id",
"setup_setting",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
uploadfolder = os.path.join(current.request.folder, "uploads")
# ---------------------------------------------------------------------
# Deployments
#
tablename = "setup_deployment"
define_table(tablename,
# @ToDo: Add ability to get a specific hash/tag
Field("repo_url",
#default = "https://github.com/sahana/eden",
default = "https://github.com/sahana/eden-stable",
label = T("Eden Repository"),
requires = IS_URL(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Eden Repository"),
T("If you wish to switch to Trunk, or use your own Fork, then you can set this here")
)
),
),
# @ToDo: Make this a multi-select (How to handle order?)
Field("country", length=2,
label = T("Country"),
requires = IS_EMPTY_OR(
# We provide a full list of countries here
# - we then check if there are appropriate locale or sub-templates to include when we deploy
IS_IN_SET_LAZY(lambda: current.gis.get_countries(key_type = "code"),
zero = current.messages.SELECT_LOCATION,
)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Country"),
T("Selecting your country means that the appropriate locale settings can be applied. If you need to support multiple countries then leave this blank.")
)
),
),
Field("template",
default = "default",
label = T("Template"),
requires = IS_IN_SET_LAZY(lambda: self.setup_get_templates(),
zero = None,
),
),
Field("template_manual",
label = T("...or enter manually"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Template (Manual Entry)"),
T("If you want to use different template(s) than the ones available in the dropdown, then you can enter the list here as e.g. 'Template,Template.SubTemplate' (locations.Country will be prepended automatically, if set and available).")
)
),
),
Field("webserver_type", "integer",
default = 3,
label = T("Web Server"),
represent = S3Represent(options = WEB_SERVERS),
requires = IS_IN_SET(WEB_SERVERS),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Web Server"),
T("Currently only Nginx and Cherokee is supported by this tool, although Apache should be possible with a little work.")
)
),
),
Field("db_type", "integer",
default = 2,
label = T("Database"),
represent = S3Represent(options = DB_SERVERS),
requires = IS_IN_SET(DB_SERVERS),
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Database"),
T("Currently only PostgreSQL is supported by this tool, although MySQL should be possible with a little work.")
)
),
),
# Set automatically
Field("db_password", "password",
readable = False,
writable = False,
),
self.setup_cloud_id(),
self.setup_dns_id(),
self.setup_smtp_id(),
self.setup_email_id(),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Create Deployment"),
title_display = T("Deployment Details"),
title_list = T("Deployments"),
title_update = T("Edit Deployment"),
label_list_button = T("List Deployments"),
label_delete_button = T("Delete Deployment"),
msg_record_created = T("Deployment added"),
msg_record_modified = T("Deployment updated"),
msg_record_deleted = T("Deployment deleted"),
msg_list_empty = T("No Deployments currently registered"))
configure(tablename,
#editable = False,
listadd = False, # Create method customises form
create_onaccept = self.setup_deployment_create_onaccept,
create_next = URL(c="setup", f="deployment",
args = ["[id]", "instance"],
),
list_fields = ["production.url",
"country",
"template",
"webserver_type",
"db_type",
],
update_onaccept = self.setup_deployment_update_onaccept,
)
add_components(tablename,
setup_instance = (# All instances:
"deployment_id",
# Production instance:
{"name": "production",
"joinby": "deployment_id",
"filterby": {
"type": 1,
},
"multiple": False,
},
),
setup_monitor_task = "deployment_id",
setup_server = (# All instances:
"deployment_id",
# Production instance:
{"name": "production_server",
"joinby": "deployment_id",
"filterby": {
"role": 1,
},
"multiple": False,
},
),
setup_setting = "deployment_id",
)
set_method("setup", "deployment",
method = "wizard",
action = self.setup_server_wizard)
represent = setup_DeploymentRepresent()
deployment_id = S3ReusableField("deployment_id", "reference %s" % tablename,
label = T("Deployment"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_deployment.id",
represent,
sort=True
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Servers
#
SERVER_ROLES = {1: "all",
#2: "db",
#3: "webserver",
#4: "eden",
}
tablename = "setup_server"
define_table(tablename,
# @ToDo: Server Groups (e.g. 'all webservers', 'all debian 9')
#group_id(),
deployment_id(),
Field("name",
label = T("Name"),
# Can do this in templates if-required
#requires = IS_NOT_IN_DB(db, "setup_server.name"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Name"),
# If not defined then can be automated by the Cloud integration, if-present
T("Optional.")
)
),
),
Field("host_ip", length=24,
label = T("IP Address"),
# required for non-cloud deployments (set in controller)
requires = IS_EMPTY_OR(
IS_IPV4(),
),
#writable = False,
comment = DIV(_class="tooltip",
# If not defined then can be automated by the Cloud integration, if-present
_title="%s|%s" % (T("IP Address"),
T("Leave blank if using a Cloud configuration or deploying to this Server (where it will default to 127.0.0.1). Set to the IP address of the remote server if you have an SSH private key.")
)
),
),
Field("role", "integer",
default = 1,
label = T("Role"),
represent = S3Represent(options = SERVER_ROLES),
requires = IS_IN_SET(SERVER_ROLES),
writable = False,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Role"),
T("Currently only 'all' is supported by this tool, although others should be possible with a little work.")
)
),
),
Field("remote_user",
default = "admin",
label = T("Remote User"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Remote User"),
T("If you wish to configure a server other than this one then you need to provide the username that the SSH private key works with. For Debian OS, this is normally 'admin'.")
)
),
),
Field("private_key", "upload",
label = T("SSH Private Key"),
length = current.MAX_FILENAME_LENGTH,
requires = IS_EMPTY_OR(IS_UPLOAD_FILENAME()),
uploadfolder = uploadfolder,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("SSH Private Key"),
T("If you wish to configure a server other than this one then either you need to use a Cloud or provide a PEM-encoded SSH private key")
)
),
),
*s3_meta_fields()
)
configure(tablename,
create_onaccept = self.setup_server_create_onaccept,
ondelete = self.setup_server_ondelete,
)
crud_strings[tablename] = Storage(
label_create = T("Add Server"),
title_display = T("Server Details"),
title_list = T("Servers"),
title_update = T("Edit Server"),
label_list_button = T("List Servers"),
label_delete_button = T("Delete Server"),
msg_record_created = T("Server added"),
msg_record_modified = T("Server updated"),
msg_record_deleted = T("Server deleted"),
msg_list_empty = T("No Servers currently registered"))
add_components(tablename,
setup_aws_server = {"joinby": "server_id",
"multiple": False,
},
setup_openstack_server = {"joinby": "server_id",
"multiple": False,
},
setup_monitor_run = {"name": "monitor_log",
"joinby": "server_id",
},
setup_monitor_server = {"joinby": "server_id",
"multiple": False,
},
setup_monitor_task = "server_id",
)
# @ToDo: Add represented Deployment/Role
represent = S3Represent(lookup = tablename,
fields = ["name", "host_ip"])
server_id = S3ReusableField("server_id", "reference %s" % tablename,
label = T("Server"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_server.id",
represent,
sort=True
)),
sortby = "name",
)
set_method("setup", "server",
method = "enable",
action = setup_monitor_server_enable_interactive)
set_method("setup", "server",
method = "disable",
action = setup_monitor_server_disable_interactive)
set_method("setup", "server",
method = "check",
action = setup_monitor_server_check)
# ---------------------------------------------------------------------
# Instances
#
# @ToDo: Allow a Test instance to source Prod data from a different deployment
# - to allow it to be run on different hosts (or even different cloud)
#
type_represent = S3Represent(options = INSTANCE_TYPES)
tablename = "setup_instance"
define_table(tablename,
deployment_id(),
Field("type", "integer",
default = 1,
label = T("Type"),
represent = type_represent,
requires = IS_IN_SET(INSTANCE_TYPES),
),
Field("url",
label = T("URL"),
requires = IS_URL(prepend_scheme = "https"),
represent = lambda opt: A(opt,
_href = opt,
_target="_blank",
),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("URL"),
T("The Public URL which will be used to access the instance")
)
),
),
# @ToDo: Allow upload of SSL as well as auto-generated Let's Encrypt
#Field("ssl_cert", "upload",
# label = T("SSL Certificate"),
# length = current.MAX_FILENAME_LENGTH,
# requires = IS_EMPTY_OR(IS_UPLOAD_FILENAME()),
# uploadfolder = uploadfolder,
# comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("SSL Certificate"),
# T("If not using Let's Encrypt e.g. you wish to use an OV or EV certificate")
# )
# ),
# ),
#Field("ssl_key", "upload",
# label = T("SSL Key"),
# length = current.MAX_FILENAME_LENGTH,
# requires = IS_EMPTY_OR(IS_UPLOAD_FILENAME()),
# uploadfolder = uploadfolder,
# comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("SSL Key"),
# T("If not using Let's Encrypt e.g. you wish to use an OV or EV certificate")
# )
# ),
# ),
Field("sender",
label = T("Email Sender"),
requires = IS_EMPTY_OR(
IS_EMAIL()),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Email Sender"),
T("The Address which you want Outbound Email to be From. Not setting this means that Outbound Email is Disabled unless you automate this with an Email Group Provider.")
)
),
),
Field("start", "boolean",
default = True, # default = False in Controller for additional instances
label = T("Start at Boot"),
represent = s3_yes_no_represent,
),
Field("task_id", "reference scheduler_task",
label = T("Scheduled Task"),
represent = lambda opt: \
A(opt,
_href = URL(c="appadmin", f="update",
args = ["db", "scheduler_task", opt]),
) if opt else current.messages["NONE"],
writable = False,
),
Field("log_file", "upload",
label = T("Log File"),
length = current.MAX_FILENAME_LENGTH,
requires = IS_EMPTY_OR(IS_UPLOAD_FILENAME()),
uploadfolder = uploadfolder,
writable = False,
),
# Has the Configuration Wizard been run?
Field("configured", "boolean",
default = False,
#represent = s3_yes_no_represent,
readable = False,
writable = False,
),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Add Instance"),
title_display = T("Instance Details"),
title_list = T("Instances"),
title_update = T("Edit Instance"),
label_list_button = T("List Instances"),
label_delete_button = T("Delete Instance"),
msg_record_created = T("Instance added"),
msg_record_modified = T("Instance updated"),
msg_record_deleted = T("Instance deleted"),
msg_list_empty = T("No Instances currently registered"))
add_components(tablename,
setup_google_instance = {"joinby": "instance_id",
"multiple": False,
},
setup_setting = "instance_id",
)
configure(tablename,
list_fields = ["type",
"url",
"start",
"task_id",
"log_file",
],
ondelete = self.setup_instance_ondelete,
update_onaccept = self.setup_instance_update_onaccept,
)
set_method("setup", "deployment",
component_name = "instance",
method = "deploy",
action = self.setup_instance_deploy,
)
set_method("setup", "deployment",
component_name = "instance",
method = "settings",
action = self.setup_instance_settings,
)
set_method("setup", "deployment",
component_name = "instance",
method = "start",
action = self.setup_instance_start,
)
set_method("setup", "deployment",
component_name = "instance",
method = "stop",
action = self.setup_instance_stop,
)
set_method("setup", "deployment",
component_name = "instance",
method = "clean",
action = self.setup_instance_clean,
)
set_method("setup", "deployment",
component_name = "instance",
method = "wizard",
action = self.setup_instance_wizard,
)
represent = S3Represent(lookup = tablename,
fields = ["type"],
labels = lambda row: type_represent(row.type))
instance_id = S3ReusableField("instance_id", "reference %s" % tablename,
label = T("Instance"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_instance.id",
represent,
sort = True
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Settings in models/000_config.py
#
tablename = "setup_setting"
define_table(tablename,
deployment_id(),
instance_id(),
Field("setting",
label = T("Setting"),
requires = IS_NOT_EMPTY(),
),
Field("current_value",
label = T("Current Value"),
writable = False,
),
Field("new_value",
label = T("New Value"),
),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Add Setting"),
title_display = T("Setting Details"),
title_list = T("Settings"),
title_update = T("Edit Setting"),
label_list_button = T("List Settings"),
label_delete_button = T("Delete Setting"),
msg_record_created = T("Setting added"),
msg_record_modified = T("Setting updated"),
msg_record_deleted = T("Setting deleted"),
msg_list_empty = T("No Settings currently registered"))
set_method("setup", "deployment",
component_name = "setting",
method = "apply",
action = self.setup_setting_apply_interactive,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"setup_deployment_id": deployment_id,
"setup_server_id": server_id,
"setup_instance_id": instance_id,
}
# -------------------------------------------------------------------------
@staticmethod
def setup_deployment_create_onaccept(form):
"""
New Deployment:
- Assign a random DB password
"""
# Assign a random DB password
chars = string.ascii_letters + string.digits + string.punctuation
# Ensure that " isn't included otherwise we get a syntax error in 000_config.py
chars = chars.replace('"', "")
# Ensure that ' isn't included otherwise we get a syntax error in pgpass.sql
chars = chars.replace("'", "")
# Ensure that @ isn't included as Web2Py doesn't like this
chars = chars.replace("@", "")
# Ensure that \ isn't included as control characters can cause the settings.database.password to not match pgpass (e.g. \a -> ^G)
chars = chars.replace("\\", "")
password = "".join(random.choice(chars) for _ in range(12))
current.db(current.s3db.setup_deployment.id == form.vars.id).update(db_password = password)
current.session.information = current.T("Press 'Deploy' when you are ready")
# -------------------------------------------------------------------------
@staticmethod
def setup_deployment_update_onaccept(form):
"""
Process changed fields on server:
- smtp_id
"""
db = current.db
s3db = current.s3db
form_vars_get = form.vars.get
record = form.record
deployment_id = form_vars_get("id")
smtp_id = form_vars_get("smtp_id")
if smtp_id == record.smtp_id:
# Nothing more to do
return
# Adjust the Deployment's SMTP Smart Host
playbook = []
# Lookup Server Details
svtable = s3db.setup_server
query = (svtable.deployment_id == deployment_id) & \
(svtable.role.belongs((1, 4)))
server = db(query).select(svtable.name,
svtable.host_ip,
svtable.remote_user,
svtable.private_key,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
db.rollback()
current.response.error = current.T("Update failed: SSH Key needed when applying away from localhost")
return
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
if smtp_id is None:
# Reset to default configuration
tasks = [{"file": {"path": "/etc/exim4/exim4.conf",
"state": "absent",
},
},
{"service": {"name": "exim4",
"state": "restarted",
},
"become": "yes",
},
]
else:
# Apply Smart Host configuration
# - like roles/exim/smart_host.yml
itable = s3db.setup_instance
query = (itable.deployment_id == record.id) & \
(itable.type == 1)
instance = db(query).select(itable.url,
limitby = (0, 1)
).first()
url = instance.url
if "://" in url:
protocol, sitename = url.split("://", 1)
else:
sitename = url
stable = s3db.setup_smtp
smtp = db(stable.id == smtp_id).select(stable.hostname,
stable.username,
stable.password,
limitby = (0, 1)
).first()
tasks = [{"copy": {"src": "/usr/share/doc/exim4-base/examples/example.conf.gz",
"dest": "/etc/exim4/example.conf.gz",
"remote_src": "yes",
},
"become": "yes",
},
{"command": "gunzip /etc/exim4/example.conf.gz",
"args": {"chdir": "/etc/exim4",
},
"become": "yes",
},
{"copy": {"src": "/etc/exim4/example.conf",
"dest": "/etc/exim4/exim4.conf",
"remote_src": "yes",
},
"become": "yes",
},
{"lineinfile": {"path": "/etc/exim4/exim4.conf",
"regexp": QuotedDouble("{{ item.regexp }}"),
"line": QuotedDouble("{{ item.line }}"),
"backrefs": "yes",
},
"loop": [{"regexp": QuotedSingle("^# primary_hostname ="),
"line": QuotedSingle("primary_hostname = %s" % sitename),
},
{"regexp": QuotedSingle("^# keep_environment ="),
"line": QuotedSingle("keep_environment ="),
},
{"regexp": QuotedSingle("^# tls_advertise_hosts = *"),
"line": QuotedSingle("tls_advertise_hosts ="),
},
],
"become": "yes",
},
{"blockinfile": {"path": "/etc/exim4/exim4.conf",
"insertafter": QuotedSingle("begin routers"),
"marker": QuotedDouble("# {mark} ANSIBLE MANAGED BLOCK Router"),
"block": Literal("""send_via_smart_host:
driver = manualroute
domains = ! +local_domains
transport = smart_host_smtp
route_list = * %s;""" % smtp.hostname),
},
"become": "yes",
},
{"blockinfile": {"path": "/etc/exim4/exim4.conf",
"insertafter": QuotedSingle("begin transports"),
"marker": QuotedDouble("# {mark} ANSIBLE MANAGED BLOCK Transport"),
"block": Literal("""smart_host_smtp:
driver = smtp
port = 587
hosts_require_auth = *
hosts_require_tls = *"""),
},
"become": "yes",
},
{"blockinfile": {"path": "/etc/exim4/exim4.conf",
"insertafter": QuotedSingle("begin authenticators"),
"marker": QuotedDouble("# {mark} ANSIBLE MANAGED BLOCK Authenticator"),
"block": Literal("""smarthost_login:
driver = plaintext
public_name = LOGIN
client_send = : %s : %s""" % (smtp.username, smtp.password)),
},
"become": "yes",
},
{"service": {"name": "exim4",
"state": "restarted",
},
"become": "yes",
},
]
# Build Playbook data structure:
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"tasks": tasks,
})
# Write Playbook
name = "smtp_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
#task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# -------------------------------------------------------------------------
@staticmethod
def setup_get_templates():
"""
Return a Dict of Templates for the user to select from
NB Controller reads this from remote repo...this is a fallback in case offline
"""
file_path = os.path.join(current.request.folder, "modules", "templates", "templates.json")
with open(file_path, "r") as file:
templates = json.loads(file.read())
return templates
# -------------------------------------------------------------------------
@staticmethod
def setup_server_create_onaccept(form):
"""
New Server:
- Enable Monitoring
"""
server_id = form.vars.id
table = current.s3db.setup_monitor_server
exists = current.db(table.server_id == server_id).select(table.id,
limitby = (0, 1)
).first()
if exists is None:
table.insert(server_id = server_id)
# -------------------------------------------------------------------------
@staticmethod
def setup_server_ondelete(row):
"""
Cleanup Tasks when a Server is Deleted
- ~/.ssh/known_hosts
"""
table = current.s3db.setup_server
server = current.db(table.id == row.id).select(table.host_ip,
limitby = (0, 1)
).first()
if server.host_ip != "127.0.0.1":
# Cleanup known_hosts as it will change for a new deployment
import subprocess
command = ["ssh-keygen",
"-f",
"~/.ssh/known_hosts",
"-R",
server.host_ip,
]
result = subprocess.run(command, stdout=subprocess.PIPE)
# -------------------------------------------------------------------------
@staticmethod
def setup_server_wizard(r, **attr):
"""
Custom S3Method to select an Instance to Configure
"""
db = current.db
deployment_id = r.id
itable = db.setup_instance
instances = db(itable.deployment_id == deployment_id).select(itable.id,
itable.type,
)
if len(instances) == 1:
# Redirect to the Instance Wizard
redirect(URL(c="setup", f="deployment",
args = [deployment_id, "instance", instances.first().id, "wizard"],
))
# Provide a Dropdown of Instances for the user to select
dropdown = SELECT(OPTION(_value=""),
_id = "instances",
)
for instance in instances:
dropdown.append(OPTION(INSTANCE_TYPES[instance.type],
_value="%s" % instance.id))
script = '''
var dropdown = $('#instances');
dropdown.change(function() {
var url = S3.Ap.concat('/setup/deployment/%s/instance/' + this.value + '/wizard');
$(location).prop('href', url);
})''' % deployment_id
response = current.response
response.s3.jquery_ready.append(script)
response.view = "simple.html"
output = {"item": DIV(P(current.T("Select the instance you wish to configure")),
dropdown),
}
return output
# -------------------------------------------------------------------------
def setup_instance_wizard(self, r, **attr):
"""
Custom S3Method to Configure an Instance
@ToDo: Support remote servers/instances
@ToDo: Option to Propagate settings from Prod to Demo &/or Test
- on by default
"""
from gluon import DIV, IS_IN_SET, SQLFORM
from gluon.sqlhtml import RadioWidget
from s3 import s3_mark_required
from s3dal import Field
T = current.T
response = current.response
settings = current.deployment_settings
has_module = settings.has_module
all_pages = settings.get_setup_wizard_questions()
# Filter out inactive pages (for disabled modules)
# - this only works for Local Server
active_pages = []
aappend = active_pages.append
for page in all_pages:
module = page.get("module")
if not module or has_module(module):
aappend(page)
current_page = int(r.get_vars.get("page", 1))
last_page = len(active_pages)
base_url = URL(c="setup", f="deployment",
args = r.args,
)
i = 0
tabs = DIV(_class = "tabs")
tappend = tabs.append
for page in active_pages:
i += 1
if i == current_page:
_class = "tab_here"
elif i == last_page:
_class = "tab_last"
else:
_class = "tab_other"
tappend(SPAN(A(T(page.get("title")),
_href = "%s?page=%s" % (base_url, i)
),
_class = _class,
))
page = active_pages[current_page - 1] # 0-indexed
questions = page.get("questions", [])
fields = []
fappend = fields.append
if len(questions):
QUESTIONS = True
for q in questions:
try:
fn = getattr(settings, q["fn"])
except:
default = None
else:
try:
default = fn() # This is only the case for Local Server
except:
default = None
setting = q["setting"]
fname = setting.replace(".", "_")
fappend(Field(fname,
default = default,
label = T(q["question"]),
requires = IS_IN_SET(q["options"]),
widget = RadioWidget.widget,
#widget = lambda f, v: RadioWidget.widget(f, v, style="divs"),
_id = "setting",
))
else:
QUESTIONS = False
modules = page.get("modules", [])
TRUE_FALSE = (True, False)
for m in modules:
module = m["module"]
default = has_module(module) # This is only the case for Local Server
label = T(m["label"])
fappend(Field(module,
default = default,
label = label,
requires = IS_IN_SET(TRUE_FALSE),
widget = RadioWidget.widget,
comment = DIV(_class="tooltip",
_title="%s|%s" % (label,
T(m["description"]),
),
),
_id = "module",
))
labels, required = s3_mark_required(fields)
response.s3.has_required = required
response.form_label_separator = ""
form = SQLFORM.factory(#formstyle = settings.get_ui_formstyle(),
submit_button = T("Submit"),
labels = labels,
separator = "",
table_name = "options", # Dummy table name
_id = "options",
*fields
)
if form.accepts(r.post_vars, current.session):
# Processs Form
if QUESTIONS:
result = setup_settings_apply(r.id, form.vars)
else:
# NB This won't display suitable tabs immediately as the enabling/disabling is async
result = setup_modules_apply(r.id, form.vars)
if result:
response.error = result
else:
response.confirmation = T("Settings Applied")
current.response.view = "setup/wizard.html"
output = {"item": form,
"tabs": tabs,
"title": T("Configuration Wizard"),
}
return output
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_deploy(r, **attr):
"""
Custom S3Method to Deploy an Instance
"""
db = current.db
s3db = current.s3db
deployment_id = r.id
# Get Instance details
# - we read all instances for Certbot configuration
instance_id = r.component_id
itable = s3db.setup_instance
query = (itable.deployment_id == deployment_id) & \
(itable.deleted == False)
instances = db(query).select(itable.id,
itable.type,
itable.url,
itable.sender,
itable.start,
)
all_sites = []
all_append = all_sites.append
for instance in instances:
url = instance.url
if "://" in url:
protocol, url = url.split("://", 1)
all_append(url)
if str(instance.id) == instance_id:
sitename = url
sender = instance.sender
start = instance.start
instance_type = instance.type
if instance.type == 1:
sitename_prod = url
# Default to SSL
# (plain http requests will still work as automatically redirected to https)
protocol = "https"
# Get Deployment details
dtable = s3db.setup_deployment
deployment = db(dtable.id == deployment_id).select(dtable.repo_url,
dtable.webserver_type,
dtable.db_type,
dtable.db_password,
dtable.country,
dtable.template,
dtable.template_manual,
dtable.cloud_id,
dtable.dns_id,
dtable.email_id,
dtable.smtp_id,
limitby = (0, 1)
).first()
# Get Server(s) details
stable = s3db.setup_server
query = (stable.deployment_id == deployment_id)
cloud_id = deployment.cloud_id
if cloud_id:
# Lookup the Instance Type
ctable = s3db.setup_cloud
cloud = db(ctable.cloud_id == deployment.cloud_id).select(ctable.instance_type,
limitby = (0, 1)
).first()
cloud_type = cloud.instance_type
if cloud_type == "setup_aws_cloud":
# Get Cloud details
ctable = s3db.setup_aws_cloud
cloud = db(ctable.cloud_id == cloud_id).select(ctable.access_key,
ctable.secret_key,
limitby = (0, 1)
).first()
# Get Server(s) details
cstable = s3db.setup_aws_server
left = cstable.on(cstable.server_id == stable.id)
servers = db(query).select(stable.id,
stable.name,
stable.role,
stable.host_ip,
stable.remote_user,
stable.private_key,
cstable.region,
cstable.instance_type,
cstable.image,
cstable.reserved_instance,
cstable.security_group,
cstable.instance_id,
left = left,
)
elif cloud_type == "setup_openstack_cloud":
# Get Cloud details
ctable = s3db.setup_openstack_cloud
cloud = db(ctable.cloud_id == cloud_id).select(ctable.auth_url,
ctable.username,
ctable.password,
ctable.project_name,
ctable.domain_name,
limitby = (0, 1)
).first()
# Get Server(s) details
cstable = s3db.setup_openstack_server
left = cstable.on(cstable.server_id == stable.id)
servers = db(query).select(stable.id,
stable.name,
stable.role,
stable.host_ip,
stable.remote_user,
stable.private_key,
cstable.instance_type,
cstable.image,
cstable.volume_size,
cstable.network,
cstable.security_group,
cstable.region,
cstable.availability_zone,
left = left,
)
else:
raise NotImplementedError
else:
# Get Server(s) details
servers = db(query).select(stable.name,
stable.role,
stable.host_ip,
stable.remote_user,
stable.private_key,
)
# Build Playbook data structure
#roles_path = os.path.join(r.folder, "private", "eden_deploy", "roles")
appname = "eden" # @ToDo: Allow this to be configurable
hostname = sitename.split(".", 1)[0]
db_password = deployment.db_password
web_server = WEB_SERVERS[deployment.webserver_type]
db_type = DB_SERVERS[deployment.db_type]
instance_type = INSTANCE_TYPES[instance_type]
prod = instance_type == "prod"
parts = deployment.repo_url.split("/")
repo_owner = parts[3]
repo = parts[4]
repo_url = "git://github.com/%s/%s.git" % (repo_owner, repo)
template_manual = deployment.template_manual
if template_manual:
# Use this list
templates = template_manual.split(",")
template = []
for t in templates:
# Strip whitespace
template.append(t.strip())
else:
# Use the value from dropdown (& introspect the locale template(s))
template = deployment.template
email_id = deployment.email_id
if email_id:
# Email Group Provider
# - assume Google for now
getable = s3db.setup_google_email
email_service = db(getable.id == email_id).select(getable.credentials,
getable.email,
limitby = (0, 1)
).first()
gitable = s3db.setup_google_instance
google_instance = db(gitable.instance_id == instance_id).select(gitable.id,
gitable.name,
gitable.email,
gitable.member,
limitby = (0, 1)
).first()
group_email = google_instance.email
from google.oauth2 import service_account
from googleapiclient.discovery import build
#creds_path = os.path.join("/", "tmp", "credentials-%s.json" % instance_id)
creds_path = os.path.join("\\", "temp", "credentials-%s.json" % instance_id)
with open(creds_path, "w") as creds_file:
creds_file.write(json.dumps(email_service.credentials))
credentials = service_account.Credentials.from_service_account_file(
creds_path,
scopes = ["https://www.googleapis.com/auth/admin.directory.group"]
)
credentials = credentials.with_subject(email_service.email)
service = build("admin", "directory_v1", credentials=credentials)
# Create Group
results = service.groups().insert(body = {"name": google_instance.name,
"email": group_email,
}).execute()
group_id = results.get("id")
# Store group_id
google_instance.update_record(group_id = group_id)
# Add Member
results = service.members().insert(groupKey = group_id,
body = {"email": google_instance.member,
}).execute()
# Cleanup
os.unlink(creds_path)
# Use newly-created group as the Sender
sender = group_email
smtp_id = deployment.smtp_id
if prod and smtp_id:
# SMTP Smart Host
stable = s3db.setup_smtp
smtp = db(stable.id == smtp_id).select(stable.hostname,
stable.username,
stable.password,
limitby = (0, 1)
).first()
smart_host = smtp.hostname
smtp_username = smtp.username
smtp_password = smtp.password
else:
smart_host = None
smtp_username = None
smtp_password = None
delete_ssh_key = True
if len(servers) == 1:
# All-in-one deployment
server = servers.first()
playbook = []
if prod and cloud_id:
tasks = []
connection = "smart"
request = current.request
if cloud_type == "setup_aws_cloud":
access_key = cloud.access_key
secret_key = cloud.secret_key
cloud_server = server["setup_aws_server"]
elif cloud_type == "setup_openstack_cloud":
auth = {"auth_url": cloud.auth_url,
"username": cloud.username,
"password": cloud.password,
"project_name": cloud.project_name,
"domain_name": cloud.domain_name,
}
cloud_server = server["setup_openstack_server"]
server = server["setup_server"]
remote_user = server.remote_user
server_name = server.name
private_key = "/tmp/%s" % server_name
public_key = "%s.pub" % private_key
provided_key = server.private_key
if provided_key:
provided_key = os.path.join(r.folder, "uploads", provided_key)
# Copy the Private Key to where it will be used
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Generate the Public Key
command = "openssl rsa -in %(private_key)s -pubout > %(public_key)s" % \
{"private_key": private_key,
"public_key": public_key,
}
tasks.append({"command": command,
})
else:
# Generate an OpenSSH keypair with the default values (4096 bits, rsa)
tasks.append({"openssh_keypair": {"path": private_key,
},
})
if cloud_type == "setup_aws_cloud":
region = cloud_server.region
# Upload Public Key to Cloud
tasks.append({"ec2_key": {"aws_access_key": access_key,
"aws_secret_key": secret_key,
"region": region,
"name": server_name,
"key_material": "{{ lookup('file', '%s') }}" % public_key,
},
})
if cloud_server.instance_id:
# Terminate old AWS instance
# @ToDo: Allow deployment on existing instances?
tasks.append({"ec2": {"aws_access_key": access_key,
"aws_secret_key": secret_key,
"region": region,
"instance_ids": cloud_server.instance_id,
"state": "absent",
},
})
# Launch Cloud instance
command = "python web2py.py -S %(appname)s -M -R %(appname)s/private/eden_deploy/tools/update_aws_server.py -A %(server_id)s %(server_name)s {{ item.public_ip }} {{ item.id }}" % \
{"appname": request.application,
"server_id": server.id,
"server_name": server_name,
}
tasks += [# Launch AWS Instance
{"ec2": {"aws_access_key": access_key,
"aws_secret_key": secret_key,
"key_name": server_name,
"region": region,
"instance_type": cloud_server.instance_type,
"image": cloud_server.image,
"group": cloud_server.security_group,
"wait": "yes",
"count": 1,
"instance_tags": {"Name": server_name,
},
},
"register": "ec2",
},
# Add new instance to host group (to associate private_key)
{"add_host": {"hostname": "{{ item.public_ip }}",
"groupname": "launched",
"ansible_ssh_private_key_file": "/tmp/%s" % server_name,
},
"loop": "{{ ec2.instances }}",
},
# Update Server record
{"command": {"cmd": command,
"chdir": request.env.web2py_path,
},
"loop": "{{ ec2.instances }}",
},
]
if cloud_server.reserved_instance:
try:
import awscli.clidriver
except ImportError:
current.session.warning = current.T("Cannot purchase reserved instance as awscli not installed")
else:
# Configure
creds_path = os.path.join("~", ".aws", "credentials")
with open(creds_path, "w") as creds_file:
creds_file.write("""[default]
aws_access_key_id = %s
aws_secret_access_key = %s""" % (access_key, secret_key))
conf_path = os.path.join("~", ".aws", "config")
with open(conf_path, "w") as conf_file:
conf_file.write("""[default]
region = %s
output = json""" % region)
import subprocess
# Lookup Offering ID
# https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-reserved-instances-offerings.html
command = ["aws",
"ec2",
"describe-reserved-instances-offerings",
"--instance-type",
cloud_server.instance_type,
"--max-duration",
"31536000", # 1 Year
"--offering-type",
"All Upfront",
"--product-description",
"Linux/UNIX (Amazon VPC)",
"--filters",
"Name=scope,Values=Region",
"--instance-tenancy",
"default",
"--offering-class",
"standard",
"--no-include-marketplace",
]
result = subprocess.run(command, stdout=subprocess.PIPE)
output = json.loads(result.stdout)
offering_id = output["ReservedInstancesOfferings"][0]["ReservedInstancesOfferingId"]
# Purchase a Reserved Instance
# https://docs.aws.amazon.com/cli/latest/reference/ec2/purchase-reserved-instances-offering.html
command = ["aws",
"ec2",
"purchase-reserved-instances-offering",
"--instance-count",
"1",
"--reserved-instances-offering-id",
offering_id,
#"--dry-run",
]
result = subprocess.run(command, stdout=subprocess.PIPE)
#output = json.loads(result.stdout)
elif cloud_type == "setup_openstack_cloud":
# Upload Public Key to Cloud
tasks.append({"os_keypair": {"auth": auth,
"name": server_name,
"public_key_file": public_key,
},
})
# Launch Cloud instance
command = "python web2py.py -S %(appname)s -M -R %(appname)s/private/eden_deploy/tools/update_server.py -A %(server_id)s %(server_name)s {{ openstack.openstack.public_v4 }}" % \
{"appname": request.application,
"server_id": server.id,
"server_name": server_name,
}
tasks += [# Launch OpenStack Instance
{"os_server": {"auth": auth,
"key_name": server_name,
"name": server_name,
"flavor": cloud_server.instance_type,
"image": cloud_server.image,
"volume_size": cloud_server.volume_size,
"boot_from_volume": "yes",
"terminate_volume": "yes",
"network": cloud_server.network,
"security_groups": cloud_server.security_group,
"region_name": cloud_server.region,
"availability_zone": cloud_server.availability_zone,
"wait": "yes",
},
"register": "openstack",
},
# Add new instance to host group (to associate private_key)
{"add_host": {"hostname": "{{ openstack.openstack.public_v4 }}",
"groupname": "launched",
"ansible_ssh_private_key_file": "/tmp/%s" % server_name,
},
},
# Update Server record
{"command": {"cmd": command,
"chdir": request.env.web2py_path,
},
},
]
dns_id = deployment.dns_id
if dns_id:
# Lookup the Instance Type
dtable = s3db.setup_dns
dns = db(dtable.dns_id == dns_id).select(dtable.instance_type,
limitby = (0, 1)
).first()
dns_type = dns.instance_type
if dns_type == "setup_gandi_dns":
gtable = s3db.setup_gandi_dns
gandi = db(gtable.dns_id == dns_id).select(gtable.api_key,
gtable.domain,
gtable.zone,
limitby = (0, 1)
).first()
gandi_api_key = gandi.api_key
url = "https://dns.api.gandi.net/api/v5/zones/%s/records" % gandi.zone
dns_record = sitename.split(".%s" % gandi.domain, 1)[0]
# Delete any existing record
tasks.append({"uri": {"url": "%s/%s" % (url, dns_record),
"method": "DELETE",
"headers": {"X-Api-Key": gandi_api_key,
},
"status_code": ["200", "204"],
},
# Don't worry if it didn't exist
"ignore_errors": "yes",
})
# Create new record
if cloud_type == "setup_aws_cloud":
tasks.append({"uri": {"url": url,
"method": "POST",
"headers": {"X-Api-Key": gandi_api_key,
},
"body_format": "json", # Content-Type: application/json
"body": '{"rrset_name": "%s", "rrset_type": "A", "rrset_ttl": 10800, "rrset_values": ["{{ item.public_ip }}"]}' % dns_record,
"status_code": ["200", "201"],
},
"loop": "{{ ec2.instances }}",
})
elif cloud_type == "setup_openstack_cloud":
tasks.append({"uri": {"url": url,
"method": "POST",
"headers": {"X-Api-Key": gandi_api_key,
},
"body_format": "json", # Content-Type: application/json
"body": '{"rrset_name": "%s", "rrset_type": "A", "rrset_ttl": 10800, "rrset_values": ["{{ openstack.openstack.public_v4 }}"]}' % dns_record,
"status_code": ["200", "201"],
},
})
elif dns_type == "setup_godaddy_dns":
gtable = s3db.setup_godaddy_dns
godaddy = db(gtable.dns_id == dns_id).select(gtable.domain,
gtable.api_key,
gtable.secret,
limitby = (0, 1)
).first()
domain = godaddy.domain
dns_record = sitename.split(".%s" % domain, 1)[0]
url = "https://api.godaddy.com/v1/domains/%s/records/A/%s" % (domain, dns_record)
# No need to delete existing record (can't anyway!)
# Create new record or replace existing
if cloud_type == "setup_aws_cloud":
tasks.append({"uri": {"url": url,
"method": "PUT",
"headers": {"Authorization": "sso-key %s:%s" % (godaddy.api_key, godaddy.secret),
},
"body_format": "json", # Content-Type: application/json
"body": '[{"name": "%s", "type": "A", "ttl": 10800, "data": "{{ item.public_ip }}"}]' % dns_record,
"status_code": ["200"],
},
"loop": "{{ ec2.instances }}",
})
elif cloud_type == "setup_openstack_cloud":
tasks.append({"uri": {"url": url,
"method": "PUT",
"headers": {"Authorization": "sso-key %s:%s" % (godaddy.api_key, godaddy.secret),
},
"body_format": "json", # Content-Type: application/json
"body": '[{"name": "%s", "type": "A", "ttl": 10800, "data": "{{ openstack.openstack.public_v4 }}"}]' % dns_record,
"status_code": ["200"],
},
})
else:
current.session.warning = current.T("Deployment will not have SSL: No DNS Provider configured to link to new server IP Address")
# @ToDo: Support Elastic IPs
protocol = "http"
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
# Wait for Server to become available
playbook.append({"hosts": "launched",
"connection": "smart",
"remote_user": remote_user,
"gather_facts": "no",
"tasks": [{"wait_for_connection": {"timeout": 300, # seconds
},
},
],
})
else:
# No Cloud or additional Instance on existing Host
remote_user = server.remote_user
host_ip = server.host_ip
# @ToDo Check that ip_addr is correct
# - if host_ip == "127.0.0.1" then we can check the contents
if host_ip == "127.0.0.1":
connection = "local"
delete_ssh_key = False
else:
# We will need the SSH key
private_key = server.private_key
if not private_key:
# Abort
current.session.error = current.T("Deployment failed: SSH Key needed when deploying away from localhost")
redirect(URL(c="setup", f="deployment",
args = [deployment_id, "instance"],
))
tasks = []
dns_id = deployment.dns_id
if dns_id:
# Lookup the Instance Type
dtable = s3db.setup_dns
dns = db(dtable.dns_id == dns_id).select(dtable.instance_type,
limitby = (0, 1)
).first()
dns_type = dns.instance_type
if dns_type == "setup_gandi_dns":
gtable = s3db.setup_gandi_dns
gandi = db(gtable.dns_id == dns_id).select(gtable.api_key,
gtable.domain,
gtable.zone,
limitby = (0, 1)
).first()
gandi_api_key = gandi.api_key
url = "https://dns.api.gandi.net/api/v5/zones/%s/records" % gandi.zone
dns_record = sitename.split(".%s" % gandi.domain, 1)[0]
# Delete any existing record
task = {"uri": {"url": "%s/%s" % (url, dns_record),
"method": "DELETE",
"headers": {"X-Api-Key": gandi_api_key,
},
"status_code": ["200", "204"],
},
# Don't worry if it didn't exist
"ignore_errors": "yes",
}
if not prod:
# only_tags
task["tags"] = [instance_type]
tasks.append(task)
# Create new record
task = {"uri": {"url": url,
"method": "POST",
"headers": {"X-Api-Key": gandi_api_key,
},
"body_format": "json", # Content-Type: application/json
"body": '{"rrset_name": "%s", "rrset_type": "A", "rrset_ttl": 10800, "rrset_values": ["%s"]}' % (dns_record, host_ip),
"status_code": ["200", "201"],
},
}
if not prod:
# only_tags
task["tags"] = [instance_type]
tasks.append(task)
elif dns_type == "setup_godaddy_dns":
gtable = s3db.setup_godaddy_dns
godaddy = db(gtable.dns_id == dns_id).select(gtable.domain,
gtable.api_key,
gtable.secret,
limitby = (0, 1)
).first()
domain = godaddy.domain
dns_record = sitename.split(".%s" % domain, 1)[0]
url = "https://api.godaddy.com/v1/domains/%s/records/A/%s" % (domain, dns_record)
# No need to delete existing record (can't anyway!)
# Create new record or replace existing
task = {"uri": {"url": url,
"method": "PUT",
"headers": {"Authorization": "sso-key %s:%s" % (godaddy.api_key, godaddy.secret),
},
"body_format": "json", # Content-Type: application/json
"body": '[{"name": "%s", "type": "A", "ttl": 10800, "data": "%s"}]' % (dns_record, host_ip),
"status_code": ["200"],
},
}
if not prod:
# only_tags
task["tags"] = [instance_type]
tasks.append(task)
else:
# Check if DNS is already configured properly
import socket
try:
ip_addr = socket.gethostbyname(sitename)
except socket.gaierror:
current.session.warning = current.T("Deployment will not have SSL: URL doesn't resolve in DNS")
protocol = "http"
#else:
# # We may wish to administer via a private IP, so shouldn't do this:
# if ip_addr != host_ip:
# current.session.warning = current.T("Deployment will not have SSL: URL doesn't match server IP Address")
# protocol = "http"
# Copy the Private Key to where it will be used
provided_key = os.path.join(r.folder, "uploads", private_key)
private_key = "/tmp/%s" % server.name
task = {"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
}
if not prod:
# only_tags
task["tags"] = [instance_type]
tasks.append(task)
# Add instance to host group (to associate private_key)
task = {"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
}
if not prod:
# only_tags
task["tags"] = [instance_type]
tasks.append(task)
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
connection = "smart"
# Deploy to Server
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": remote_user,
"become_method": "sudo",
#"become_user": "root",
"vars": {"appname": appname,
"all_sites": ",".join(all_sites),
"country": deployment.country,
"db_ip": "127.0.0.1",
"db_type": db_type,
"hostname": hostname,
"password": db_password,
"protocol": protocol,
"repo_url": repo_url,
"sender": sender,
"sitename": sitename,
"sitename_prod": sitename_prod,
"smart_host": smart_host,
"smtp_username": smtp_username,
"smtp_password": smtp_password,
"start": start,
"template": template,
"type": instance_type,
"web_server": web_server,
},
"roles": [{"role": "common" },
{"role": "ansible" },
{"role": "exim" },
{"role": db_type },
{"role": "uwsgi" },
{"role": web_server },
{"role": "final" },
]
})
if delete_ssh_key:
# Delete SSH private key from the filesystem
task = {"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": [{"file": {"path": private_key,
"state": "absent",
},
},
],
}
if not prod:
# only_tags
task["tags"] = [instance_type]
playbook.append(task)
else:
# Separate Database
# @ToDo: Needs completing
# Abort
current.session.error = current.T("Deployment failed: Currently only All-in-one deployments supported with this tool")
redirect(URL(c="setup", f="deployment",
args = [deployment_id, "instance"],
))
for server in servers:
if server.role == 2:
db_ip = server.host_ip
private_key = server.private.key
remote_user = server.remote_user
else:
webserver_ip = server.host_ip
playbook = [{"hosts": db_ip,
"remote_user": remote_user,
"become_method": "sudo",
#"become_user": "root",
"vars": {"db_type": db_type,
"password": db_password,
"type": instance_type
},
"roles": [{ "role": db_type }, # "%s/%s" % (roles_path, db_type)
]
},
{"hosts": webserver_ip,
#"remote_user": remote_user,
"become_method": "sudo",
#"become_user": "root",
"vars": {"appname": appname,
"all_sites": ",".join(all_sites),
"country": deployment.country,
"db_ip": db_ip,
"db_type": db_type,
"hostname": hostname,
"password": db_password,
"protocol": protocol,
"repo_url": repo_url,
"sender": sender,
"sitename": sitename,
"sitename_prod": sitename_prod,
"start": start,
"template": template,
"type": instance_type,
"web_server": web_server,
},
"roles": [{"role": "common" },
{"role": "ansible" },
{"role": "exim" },
{"role": "uwsgi" },
{"role": web_server },
{"role": "final" },
],
},
]
# Write Playbook
name = "deployment_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
if not prod:
# only_tags
task_vars["tags"] = [instance_type]
task_id = current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# Link scheduled task to current record
# = allows us to monitor deployment progress
db(itable.id == instance_id).update(task_id = task_id)
current.session.confirmation = current.T("Deployment initiated")
redirect(URL(c="setup", f="deployment",
args = [deployment_id, "instance"],
))
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_settings(r, **attr):
"""
Custom interactive S3Method to Read the Settings for an instance
from models/000_config.py
"""
deployment_id = r.id
setup_instance_settings_read(r.component_id, deployment_id)
current.session.confirmation = current.T("Settings Read")
redirect(URL(c="setup", f="deployment",
args = [deployment_id, "setting"]),
)
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_start(r, **attr):
"""
Custom interactive S3Method to Start an Instance
"""
setup_instance_method(r.component_id)
current.session.confirmation = current.T("Instance Started")
redirect(URL(c="setup", f="deployment",
args = [r.id, "instance"]),
)
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_stop(r, **attr):
"""
Custom interactive S3Method to Stop an Instance
"""
setup_instance_method(r.component_id, "stop")
current.session.confirmation = current.T("Instance Stopped")
redirect(URL(c="setup", f="deployment",
args = [r.id, "instance"]),
)
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_clean(r, **attr):
"""
Custom interactive S3Method to Clean an Instance
"""
setup_instance_method(r.component_id, "clean")
current.session.confirmation = current.T("Instance Clean Started")
redirect(URL(c="setup", f="deployment",
args = [r.id, "instance"]),
)
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_ondelete(row):
"""
Cleanup Tasks when an Instance is Deleted
- DNS
"""
db = current.db
s3db = current.s3db
dtable = s3db.setup_deployment
deployment = db(dtable.id == row.deployment_id).select(dtable.dns_id,
limitby = (0, 1)
).first()
dns_id = deployment.dns_id
if dns_id is None:
# Nothing to cleanup
return
# Lookup the Instance Type
dtable = s3db.setup_dns
dns = db(dtable.dns_id == dns_id).select(dtable.instance_type,
limitby = (0, 1)
).first()
dns_type = dns.instance_type
if dns_type == "setup_godaddy_dns":
# No way currently to cleanup without replacing entire domain, omitting this record
return
#elif dns_type == "setup_gandi_dns":
# Read URL (only deleted_fks are in the row object)
itable = s3db.setup_instance
instance = db(itable.id == row.id).select(itable.url,
limitby = (0, 1)
).first()
# Get Gandi details
gtable = s3db.setup_gandi_dns
gandi = db(gtable.dns_id == dns_id).select(gtable.api_key,
gtable.domain,
gtable.zone,
limitby = (0, 1)
).first()
gandi_api_key = gandi.api_key
domain = gandi.domain
url = "https://dns.api.gandi.net/api/v5/zones/%s/records" % gandi.zone
# Delete DNS record
parts = instance.url.split("://")
if len(parts) == 1:
sitename = parts[0]
else:
sitename = parts[1]
dns_record = sitename.split(".%s" % domain, 1)[0]
playbook = [{"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": [{"uri": {"url": "%s/%s" % (url, dns_record),
"method": "DELETE",
"headers": {"X-Api-Key": gandi_api_key,
},
},
# Don't worry if it didn't exist
"ignore_errors": "yes",
},
],
},
]
# Write Playbook
name = "instance_ondelete_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run Playbook
#task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# -------------------------------------------------------------------------
@staticmethod
def setup_instance_update_onaccept(form):
"""
Process changed fields on server:
- sender
- start
"""
db = current.db
s3db = current.s3db
form_vars_get = form.vars.get
record = form.record
deployment_id = record.deployment_id
instance_id = form_vars_get("id")
sender = form_vars_get("sender")
if sender != record.sender:
# Adjust the Instance's Email Sender
stable = s3db.setup_setting
query = (stable.instance_id == instance_id) & \
(stable.setting == "mail.sender")
setting = db(query).select(stable.id,
limitby = (0, 1)
).first()
if setting:
setting_id = setting.id
setting.update_record(new_value = sender)
else:
setting_id = stable.insert(deployment_id = deployment_id,
instance_id = instance_id,
setting = "mail.sender",
new_value = sender,
)
setup_setting_apply(setting_id)
if form_vars_get("start") is True:
if record.start is False:
# Start Instance at Boot
command = "enable"
else:
# Nothing more to do
return
elif record.start is True:
# Stop Instance at Boot
command = "disable"
else:
# Nothing more to do
return
playbook = []
# Lookup Server Details
svtable = s3db.setup_server
query = (svtable.deployment_id == deployment_id) & \
(svtable.role.belongs((1, 4)))
server = db(query).select(svtable.name,
svtable.host_ip,
svtable.remote_user,
svtable.private_key,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
db.rollback()
current.response.error = current.T("Update failed: SSH Key needed when applying away from localhost")
return
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
appname = "eden" # @ToDo: Allow this to be configurable
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.type,
limitby = (0, 1)
).first()
instance_type = INSTANCE_TYPES[instance.type]
# @ToDo: Lookup webserver_type from deployment once we support Apache
# Build Playbook data structure:
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"tasks": [{"name": "Modify Startup",
"command": "update-rc.d uwsgi-%s {{item}}" % instance_type,
"become": "yes",
"loop": ["%s 2" % command,
"%s 3" % command,
"%s 4" % command,
"%s 5" % command,
],
},
],
})
# Write Playbook
name = "boot_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# -------------------------------------------------------------------------
@staticmethod
def setup_setting_apply_interactive(r, **attr):
"""
Custom interactive S3Method to Apply a Setting to an instance
via models/000_config.py
"""
result = setup_setting_apply(r.component_id)
if result:
current.session.error = result
else:
current.session.confirmation = current.T("Setting Applied")
redirect(URL(c="setup", f="deployment",
args = [r.id, "setting"]),
)
# =============================================================================
class S3SetupMonitorModel(S3Model):
names = ("setup_monitor_server",
"setup_monitor_check",
"setup_monitor_task",
"setup_monitor_run",
"setup_monitor_alert",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
server_id = self.setup_server_id
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
STATUS_OPTS = {0 : T("Unknown"),
1 : T("OK"),
2 : T("Warning"),
3 : T("Critical"),
}
status_id = S3ReusableField("status", "integer", notnull=True,
default = 0,
label = T("Status"),
represent = lambda opt: \
STATUS_OPTS.get(opt,
UNKNOWN_OPT),
requires = IS_IN_SET(STATUS_OPTS,
zero=None),
writable = False,
)
# =====================================================================
# Servers
# - extensions for Monitoring:
# Are checks Enabled?
# Overall Server Status
#
tablename = "setup_monitor_server"
define_table(tablename,
#Field("name", unique=True, length=255,
# label = T("Name"),
# ),
server_id(),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
status_id(),
s3_comments(),
*s3_meta_fields())
configure(tablename,
onaccept = self.setup_monitor_server_onaccept,
)
# =====================================================================
# Checks
# - monitoring scripts available
#
tablename = "setup_monitor_check"
define_table(tablename,
Field("name", unique=True, length=255,
label = T("Name"),
),
# Name of a function in modules.<settings.get_setup_monitor_template()>.monitor[.py]
# List populated in controllers/setup/monitor_check()
Field("function_name",
label = T("Function"),
comment = T("Functions defined in <template>.monitor.py")
),
Field("period", "integer",
default = 300,
label = T("Period"),
requires = IS_INT_IN_RANGE(60, 31536000), # Max 1 Year
represent = IS_INT_AMOUNT.represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Period"),
T("How many seconds between runs.")
)
),
),
# Default Options for this Check
Field("options", "json",
label = T("Options"),
requires = IS_EMPTY_OR(
IS_JSONS3()
),
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Check"),
title_display = T("Check Details"),
title_list = T("Checks"),
title_update = T("Edit Check"),
title_upload = T("Import Checks"),
label_list_button = T("List Checks"),
label_delete_button = T("Delete Check"),
msg_record_created = T("Check added"),
msg_record_modified = T("Check updated"),
msg_record_deleted = T("Check deleted"),
msg_list_empty = T("No Checks currently registered"))
represent = S3Represent(lookup = tablename)
check_id = S3ReusableField("check_id", "reference %s" % tablename,
label = T("Check"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db, "setup_monitor_check.id",
represent),
)
add_components(tablename,
setup_monitor_task = "check_id",
)
# =====================================================================
# Tasks
#
tablename = "setup_monitor_task"
define_table(tablename,
self.setup_deployment_id(readable = False,
writable = False,
),
server_id(),
check_id(),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("period", "integer",
default = 300,
label = T("Period"),
requires = IS_INT_IN_RANGE(60, 31536000), # Max 1 Year
represent = IS_INT_AMOUNT.represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Period"),
T("How many seconds between runs.")
)
),
),
# Options for this Check on this Server
# - including any thresholds for non-Critical results
Field("options", "json",
label = T("Options"),
requires = IS_EMPTY_OR(
IS_JSONS3()
),
),
status_id(),
Field("result", "text",
label = T("Result"),
represent = lambda v: v.split("\n")[0] if v else \
current.messages["NONE"],
),
s3_datetime(label = T("Last Checked"),
writable = False,
),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Task"),
title_display = T("Task Details"),
title_list = T("Tasks"),
title_update = T("Edit Task"),
title_upload = T("Import Tasks"),
label_list_button = T("List Tasks"),
label_delete_button = T("Delete Task"),
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task deleted"),
msg_list_empty = T("No Tasks currently registered"))
crud_form = S3SQLCustomForm("server_id",
"check_id",
"enabled",
"period",
"options",
S3SQLInlineComponent("monitor_alert",
label = T("Alerts"),
fields = [("", "person_id"),
],
),
"status",
"result",
"comments",
)
configure(tablename,
# Open the Log after creation
create_next = URL(c="setup", f="monitor_task",
args = ["[id]", "monitor_run"],
),
crud_form = crud_form,
list_fields = ["deployment_id",
"server_id",
"check_id",
"status",
"result",
"date",
],
onaccept = self.setup_monitor_task_onaccept,
)
represent = setup_MonitorTaskRepresent()
task_id = S3ReusableField("task_id", "reference %s" % tablename,
label = T("Task"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "setup_monitor_task.id",
represent)),
)
add_components(tablename,
setup_monitor_alert = "task_id",
setup_monitor_run = "task_id",
)
set_method("setup", "monitor_task",
method = "enable",
action = setup_monitor_task_enable_interactive)
set_method("setup", "monitor_task",
method = "disable",
action = setup_monitor_task_disable_interactive)
set_method("setup", "monitor_task",
method = "check",
action = setup_monitor_task_run)
# =====================================================================
# Runs
#
tablename = "setup_monitor_run"
define_table(tablename,
server_id(writable = False),
task_id(writable = False),
status_id(),
Field("result", "text",
label = T("Result"),
represent = lambda v: v.split("\n")[0] if v else \
current.messages["NONE"],
),
s3_comments(),
*s3_meta_fields()#,
#on_define = lambda table: \
# [table.created_on.set_attributes(represent = \
# lambda dt: S3DateTime.datetime_represent(dt, utc=True)),
# ]
)
crud_strings[tablename] = Storage(
#label_create = T("Create Log Entry"),
title_display = T("Log Entry Details"),
title_list = T("Log Entries"),
title_update = T("Edit Log Entry"),
#title_upload = T("Import Log Entries"),
label_list_button = T("List Log Entries"),
label_delete_button = T("Delete Log Entry"),
#msg_record_created = T("Log Entry added"),
msg_record_modified = T("Log Entry updated"),
msg_record_deleted = T("Log Entry deleted"),
msg_list_empty = T("No Log Entries currently registered"))
configure(tablename,
# Logs inserted automatically
insertable = False,
list_fields = ["created_on",
"server_id",
"task_id",
"status",
"result",
],
orderby = "setup_monitor_run.created_on desc",
)
# =============================================================================
# Alerts
# - people to alert when status != OK
#
tablename = "setup_monitor_alert"
define_table(tablename,
task_id(),
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
widget = None, # Dropdown, not Autocomplete
),
# Email-only for now
#self.pr_contact_id(),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def setup_monitor_server_onaccept(form):
"""
Process the Enabled Flag
"""
form_vars = form.vars
if form.record:
# Update form
# process of changed
if form.record.enabled and not form_vars.enabled:
setup_monitor_server_disable(form_vars.id)
elif form_vars.enabled and not form.record.enabled:
setup_monitor_server_enable(form_vars.id)
else:
# Create form
# Process only if enabled
if form_vars.enabled:
setup_monitor_server_enable(form_vars.id)
# -------------------------------------------------------------------------
@staticmethod
def setup_monitor_task_onaccept(form):
"""
Process the Enabled Flag
Create Form:
Set deployment_id
PrePopulate Task Options/Period from Check Options/Period
"""
form_vars = form.vars
if form.record:
# Update form
# Process if changed
if form.record.enabled and not form_vars.enabled:
setup_monitor_task_disable(form_vars.id)
elif form_vars.enabled and not form.record.enabled:
setup_monitor_task_enable(form_vars.id)
else:
# Create form
db = current.db
record_id = form_vars.id
if form_vars.enabled:
# Process only if enabled
setup_monitor_task_enable(record_id)
# Read default check options
ctable = db.setup_monitor_check
check = db(ctable.id == form_vars.check_id).select(ctable.options,
ctable.period,
limitby = (0, 1)
).first()
# Read deployment_id
ttable = db.setup_monitor_task
server_id = form_vars.server_id
if server_id:
task = None
else:
# Read record
task = db(ttable.id == record_id).select(ttable.id,
ttable.server_id,
limitby = (0, 1)
).first()
server_id = task.server_id
stable = db.setup_server
server = db(stable.id == server_id).select(stable.deployment_id,
limitby = (0, 1)
).first()
deployment_id = server.deployment_id
# Update record
if task:
task.update_record(deployment_id = deployment_id,
options = check.options,
period = check.period,
)
else:
db(ttable.id == record_id).update(deployment_id = deployment_id,
options = check.options,
period = check.period,
)
# =============================================================================
def setup_monitor_server_enable(monitor_server_id):
"""
Enable Monitoring for a Server
- Schedule all enabled Tasks
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
mstable = current.s3db.setup_monitor_server
record = db(mstable.id == monitor_server_id).select(mstable.id,
mstable.enabled,
mstable.server_id,
limitby = (0, 1)
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
table = db.setup_monitor_task
query = (table.server_id == record.server_id) & \
(table.enabled == True) & \
(table.deleted == False)
tasks = db(query).select(table.id,
table.period,
)
# Do we have any Tasks already Scheduled?
args = []
for task in tasks:
args.append("[%s]" % task.id)
ttable = db.scheduler_task
query = ((ttable.function_name == "setup_monitor_run_task") & \
(ttable.args.belongs(args)) & \
(ttable.status.belongs(["RUNNING",
"QUEUED",
"ALLOCATED"])))
exists = db(query).select(ttable.id)
exists = [r.id for r in exists]
for task in tasks:
task_id = task.id
if task_id not in exists:
current.s3task.schedule_task("setup_monitor_run_task",
args = [task_id],
period = task.period,# seconds
timeout = 60, # seconds
repeats = 0, # unlimited
retry_failed = -1, # unlimited
)
return "Server Monitoring enabled"
# =============================================================================
def setup_monitor_server_enable_interactive(r, **attr):
"""
Enable Monitoring for a Server
- Schedule all enabled Tasks
S3Method for interactive requests
"""
server_id = r.id
table = current.s3db.setup_monitor_server
monitor_server = current.db(table.server_id == server_id).select(table.id,
limitby = (0, 1)
).first()
if monitor_server:
monitor_server_id = monitor_server.id
else:
monitor_server_id = table.insert(server_id = server_id)
result = setup_monitor_server_enable(monitor_server_id)
current.session.confirmation = result
redirect(URL(f = "server"))
# =============================================================================
def setup_monitor_server_disable(monitor_server_id):
"""
Disable Monitoring for a Server
- Remove all related Tasks
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
mstable = current.s3db.setup_monitor_server
record = db(mstable.id == monitor_server_id).select(mstable.id,
mstable.enabled,
mstable.server_id,
limitby = (0, 1)
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
table = db.setup_monitor_task
query = (table.server_id == record.server_id) & \
(table.enabled == True) & \
(table.deleted == False)
tasks = db(query).select(table.id)
# Do we have any Tasks already Scheduled?
args = []
for task in tasks:
args.append("[%s]" % task.id)
ttable = db.scheduler_task
query = ((ttable.function_name == "setup_monitor_run_task") & \
(ttable.args.belongs(args)) & \
(ttable.status.belongs(["RUNNING",
"QUEUED",
"ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)
).first()
if exists:
# Disable all
db(query).update(status = "STOPPED")
return "Server Monitoring disabled"
# =============================================================================
def setup_monitor_server_disable_interactive(r, **attr):
"""
Disable Monitoring for a Server
- Remove all related Tasks
S3Method for interactive requests
"""
table = current.s3db.setup_monitor_server
monitor_server = current.db(table.server_id == r.id).select(table.id,
limitby = (0, 1)
).first()
result = setup_monitor_server_disable(monitor_server.id)
current.session.confirmation = result
redirect(URL(f = "server"))
# =============================================================================
def setup_monitor_server_check(r, **attr):
"""
Run all enabled Tasks for this server
S3Method for interactive requests
"""
server_id = r.id
table = current.s3db.setup_monitor_task
query = (table.server_id == server_id) & \
(table.enabled == True) & \
(table.deleted == False)
tasks = current.db(query).select(table.id)
run_async = current.s3task.run_async
for task in tasks:
run_async("setup_monitor_run_task",
args = [task.id])
current.session.confirmation = \
current.T("The check requests have been submitted, so results should appear shortly - refresh to see them")
redirect(URL(c="setup", f="server",
args = [server_id, "monitor_log"],
))
# =============================================================================
def setup_monitor_task_enable(task_id):
"""
Enable a Task
- Schedule Check (if server enabled)
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.setup_monitor_task
record = db(table.id == task_id).select(table.id,
table.server_id,
table.enabled,
table.period,
limitby = (0, 1),
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
mstable = s3db.setup_monitor_server
monitor_server = db(mstable.server_id == record.server_id).select(mstable.enabled,
limitby = (0, 1),
).first()
if monitor_server.enabled:
# Is the task already Scheduled?
ttable = db.scheduler_task
args = "[%s]" % task_id
query = ((ttable.function_name == "setup_monitor_run_task") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING",
"QUEUED",
"ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)
).first()
if not exists:
current.s3task.schedule_task("setup_monitor_run_task",
args = [task_id],
period = record.period,# seconds
timeout = 60, # seconds
repeats = 0, # unlimited
retry_failed = -1, # unlimited
)
return "Task enabled"
# =============================================================================
def setup_monitor_task_enable_interactive(r, **attr):
"""
Enable a Task
- Schedule Check
S3Method for interactive requests
"""
result = setup_monitor_task_enable(r.id)
current.session.confirmation = result
redirect(URL(f = "monitor_task"))
# =============================================================================
def setup_monitor_task_disable(task_id):
"""
Disable a Check
- Remove Schedule for Check
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
table = current.s3db.setup_monitor_task
record = db(table.id == task_id).select(table.id, # needed for update_record
table.enabled,
limitby = (0, 1),
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Is the task already Scheduled?
ttable = db.scheduler_task
args = "[%s]" % task_id
query = ((ttable.function_name == "setup_monitor_run_task") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING",
"QUEUED",
"ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby = (0, 1)
).first()
if exists:
# Disable all
db(query).update(status = "STOPPED")
return "Task disabled"
# =============================================================================
def setup_monitor_task_disable_interactive(r, **attr):
"""
Disable a Task
- Remove Schedule for Check
S3Method for interactive requests
"""
result = setup_monitor_task_disable(r.id)
current.session.confirmation = result
redirect(URL(f = "monitor_task"))
# =============================================================================
def setup_monitor_task_restart():
"""
Restart all Enabled Monitor Tasks
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
# Clear all current Tasks from the Scheduler
ttable = s3db.scheduler_task
db(ttable.function_name == "setup_monitor_run_task").delete()
# Schedule all Enabled Tasks on all Enabled Servers
stable = s3db.setup_monitor_server
query = (stable.enabled == True) & \
(stable.deleted == False)
servers = db(query).select(stable.server_id)
servers = [s.server_id for s in servers]
table = s3db.setup_monitor_task
query = (table.server_id.belongs(servers)) & \
(table.enabled == True) & \
(table.deleted == False)
tasks = db(query).select(table.id,
table.period,
)
schedule_task = current.s3task.schedule_task
for task in tasks:
schedule_task("setup_monitor_run_task",
args = [task.id],
period = task.period,# seconds
timeout = 60, # seconds
repeats = 0, # unlimited
retry_failed = -1, # unlimited
)
return "Monitor Tasks restarted"
# =============================================================================
def setup_monitor_task_run(r, **attr):
"""
Run a Task
S3Method for interactive requests
"""
task_id = r.id
current.s3task.run_async("setup_monitor_task_run",
args = [task_id])
current.session.confirmation = \
current.T("The check request has been submitted, so results should appear shortly - refresh to see them")
redirect(URL(c="setup", f="monitor_task",
args = [task_id, "monitor_run"],
))
# =============================================================================
def setup_monitor_run_task(task_id):
"""
Check a Service
Non-interactive function run by Scheduler
"""
db = current.db
s3db = current.s3db
request = current.request
settings = current.deployment_settings
table = s3db.setup_monitor_task
ctable = db.setup_monitor_check
query = (table.id == task_id) & \
(table.check_id == ctable.id)
row = db(query).select(table.server_id,
ctable.function_name,
limitby = (0, 1)
).first()
server_id = row["setup_monitor_task.server_id"]
function_name = row["setup_monitor_check.function_name"]
# Load the Monitor template for this deployment
template = settings.get_setup_monitor_template()
module_name = "applications.%s.modules.templates.%s.monitor" \
% (request.application, template)
__import__(module_name)
mymodule = sys.modules[module_name]
S3Monitor = mymodule.S3Monitor()
# Get the Check Script
try:
fn = getattr(S3Monitor, function_name)
except:
current.log.error("Check Script not found: %s" % function_name)
return None
# Create an entry in the monitor_run table
rtable = db.setup_monitor_run
run_id = rtable.insert(server_id = server_id,
task_id = task_id,
)
try:
# Run the script
result = fn(task_id, run_id)
except Exception:
import traceback
tb_parts = sys.exc_info()
tb_text = "".join(traceback.format_exception(tb_parts[0],
tb_parts[1],
tb_parts[2]))
result = tb_text
status = 3 # Critical
else:
try:
status = result.get("status")
except AttributeError:
status = 3 # Critical
try:
result = result.get("result")
except AttributeError:
result = ""
# Store the Result & Status
# ... in Run
db(rtable.id == run_id).update(result = result,
status = status)
# ...in Task
db(table.id == task_id).update(result = result,
status = status,
date = request.utcnow,
)
# ...in Host
check_lower = None
stable = db.setup_monitor_server
if status == 3:
# Task at Critical => Server -> Critical
db(stable.server_id == server_id).update(status = status)
else:
server = db(stable.server_id == server_id).select(stable.id,
stable.status,
limitby = (0, 1),
).first()
if status == server.status:
pass
elif status > server.status:
# Increase Server Status to match Task Status
server.update_record(status = status)
else:
# status < server.status
# Check if we should Lower the Server Status to match Task Status
query = (table.id != task_id) & \
(table.server_id == server_id) & \
(table.status > status) & \
(table.enabled == True) & \
(table.deleted == False)
higher = db(query).select(table.id,
limitby = (0, 1)
).first()
if higher is None:
server.update_record(status = status)
if status > 1:
# Send any Alerts
atable = db.setup_monitor_alert
ptable = s3db.pr_person
query = (atable.task_id == task_id) & \
(atable.person_id == ptable.id)
recipients = db(query).select(ptable.pe_id)
if len(recipients) > 0:
stable = s3db.setup_server
server = db(stable.id == server_id).select(stable.name,
stable.host_ip,
limitby = (0, 1)
).first()
if server.host_ip == "127.0.0.1":
server_name = settings.get_system_name_short()
else:
server_name = server.name
recipients = [p.pe_id for p in recipients]
subject = "%s: %s" % (server_name,
result.split("\n")[0],
)
current.msg.send_by_pe_id(recipients,
subject = subject,
message = result,
)
# Pass result back to scheduler_run
return result
# =============================================================================
def setup_monitor_check_email_reply(run_id):
"""
Check whether we have received a reply to an Email check
"""
db = current.db
s3db = current.s3db
rtable = s3db.setup_monitor_run
run = db(rtable.id == run_id).select(rtable.id,
rtable.task_id,
rtable.status,
rtable.server_id,
limitby = (0, 1)
).first()
try:
status = run.status
except:
result = "Critical: Can't find run record"
current.debug.error(result)
# @ToDo: Send an Alert...however we can't find the details to do this
else:
task_id = run.task_id
ttable = s3db.setup_monitor_task
task = db(ttable.id == task_id).select(ttable.id,
ttable.options,
limitby = (0, 1)
).first()
result = "Critical: Reply not received after %s minutes" % task.options.get("wait", 60)
if status != 3:
# Make it go Critical
# ... in Run
record.update_record(result,
status = 3)
# ...in Task
task.update_record(result = result,
status = 3)
# ...in Host
db(s3db.setup_monitor_server.server_id == server_id).update(status = 3)
# Send Alert(s)
atable = db.setup_monitor_alert
ptable = s3db.pr_person
query = (atable.task_id == task_id) & \
(atable.person_id == ptable.id)
recipients = db(query).select(ptable.pe_id)
if len(recipients) > 0:
recipients = [p.pe_id for p in recipients]
subject = "%s: %s" % (current.deployment_settings.get_system_name_short(),
result,
)
current.msg.send_by_pe_id(recipients,
subject = subject,
message = result,
)
return result
# =============================================================================
def setup_write_playbook(playbook_name,
playbook_data,
):
"""
Write an Ansible Playbook file
"""
try:
import yaml
except ImportError:
error = "PyYAML module needed for Setup"
current.log.error(error)
current.response.error = error
return {}
# https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data#answer-8641732
def double_quoted_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
yaml.add_representer(QuotedDouble, double_quoted_presenter)
def single_quoted_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style="'")
yaml.add_representer(QuotedSingle, single_quoted_presenter)
def literal_presenter(dumper, data):
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
yaml.add_representer(Literal, literal_presenter)
folder = current.request.folder
os_path = os.path
os_path_join = os_path.join
playbook_folder = os_path_join(folder, "uploads", "playbook")
if not os_path.isdir(playbook_folder):
os.mkdir(playbook_folder)
playbook_path = os_path_join(playbook_folder, playbook_name)
with open(playbook_path, "w") as yaml_file:
yaml_file.write(yaml.dump(playbook_data, default_flow_style=False))
task_vars = {"playbook": playbook_path,
}
return task_vars
# =============================================================================
def setup_run_playbook(playbook, instance_id=None, tags=None, hosts=None):
"""
Run an Ansible Playbook & return the result
- designed to be run as a Scheduled Task
http://docs.ansible.com/ansible/latest/dev_guide/developing_api.html
https://serversforhackers.com/c/running-ansible-2-programmatically
"""
# No try/except here as we want ImportErrors to raise
import shutil
import yaml
from ansible.module_utils.common.collections import ImmutableDict
from ansible.parsing.dataloader import DataLoader
from ansible.vars.manager import VariableManager
from ansible.inventory.manager import InventoryManager
from ansible.playbook.play import Play
from ansible.playbook.task_include import TaskInclude
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
from ansible import context
#W2P_TASK = current.W2P_TASK
if hosts is None:
# NB This is the only current usecase as we always start on localhost
# - remote servers are then accessed once we have the SSH private_key available
hosts = ["127.0.0.1"]
# Logging
class PlayLogger:
"""
Store log output in a single String object.
We create a new object per Ansible run
"""
def __init__(self):
self.log = ""
def append(self, log_line):
""" Append to log """
self.log += log_line + "\n\n"
logger = PlayLogger()
class ResultCallback(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = "stored"
CALLBACK_NAME = "database"
def __init__(self):
self._last_task_banner = None
self._last_task_name = None
self._task_type_cache = {}
super(ResultCallback, self).__init__()
@staticmethod
def _handle_exception(result):
# Catch an exception
# This may never be called because default handler deletes
# the exception, since Ansible thinks it knows better
traceback = result.get("exception")
if traceback:
# Extract the error message and log it
#error = traceback.strip().split("\n")[-1]
#logger.append(error)
# Log the whole Traceback
logger.append(traceback)
# Remove the exception from the result so it's not shown every time
del result["exception"]
#current.s3task.scheduler.stop_task(W2P_TASK.id)
# @ToDo: If this happens during a deploy from co-app and after nginx has replaced co-app on Port 80 then revert to co-app
def _print_task_banner(self, task):
args = u", ".join(u"%s=%s" % a for a in task.args.items())
prefix = self._task_type_cache.get(task._uuid, "TASK")
# Use cached task name
task_name = self._last_task_name
if task_name is None:
task_name = task.get_name().strip()
logger.append(u"%s: %s\n[%s]" % (prefix, task_name, args))
def v2_runner_on_failed(self, result, ignore_errors=False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
if result._task.loop and "results" in result._result:
self._process_items(result)
else:
logger.append("fatal: [%s]: FAILED!\n%s" % \
(result._host.get_name(),
self._dump_results(result._result, indent=4)))
def v2_runner_on_ok(self, result):
if isinstance(result._task, TaskInclude):
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._result.get("changed", False):
msg = "changed: [%s]" % result._host.get_name()
else:
msg = "ok: [%s]" % result._host.get_name()
if result._task.loop and "results" in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
msg += "\n%s" % self._dump_results(result._result, indent=4)
logger.append(msg)
def v2_runner_on_unreachable(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
logger.append("fatal: [%s]: UNREACHABLE!\n%s" % \
(result._host.get_name(),
self._dump_results(result._result, indent=4)))
def v2_runner_item_on_failed(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
msg = "failed: [%s]" % (result._host.get_name())
logger.append(msg + " (item=%s)\n%s" % \
(self._get_item_label(result._result),
self._dump_results(result._result, indent=4)))
def v2_runner_item_on_ok(self, result):
if isinstance(result._task, TaskInclude):
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._result.get("changed", False):
msg = "changed"
else:
msg = "ok"
msg += ": [%s] (item=%s)\n%s" % \
(result._host.get_name(),
self._get_item_label(result._result),
self._dump_results(result._result, indent=4))
logger.append(msg)
# Copy the current working directory to revert back to later
cwd = os.getcwd()
# Change working directory
request = current.request
roles_path = os.path.join(request.env.applications_parent, request.folder, "private", "eden_deploy", "roles")
os.chdir(roles_path)
# Since the API is constructed for CLI, it expects certain options to always be set in the context object
if tags is None:
tags = [] # Needs to be an iterable
tmp_path = os.path.join("/", "tmp", "ansible")
context.CLIARGS = ImmutableDict(become = None,
become_method = None,
become_user = None,
check = False,
diff = False,
#extra_vars = {"ansible_local_temp": tmp_path,
# "ansible_local_tmp": tmp_path,
# "ansible_ssh_control_path_dir": tmp_path,
# },
forks = 10,
module_path = [roles_path],
tags = tags,
verbosity = 1,
)
# Initialize needed objects
loader = DataLoader() # Takes care of finding and reading yaml, json and ini files
# Instantiate Logging for handling results as they come in
results_callback = ResultCallback()
# Create Inventory and pass to Var manager
if len(hosts) == 1:
# Ensure that we have a comma to tell Ansible that this is a list of hosts not a file to read from
sources = "%s," % hosts[0]
else:
sources = ",".join(hosts)
inventory = InventoryManager(loader = loader,
sources = sources)
variable_manager = VariableManager(loader = loader,
inventory = inventory)
# Load Playbook
with open(playbook, "r") as yaml_file:
# https://msg.pyyaml.org/load
playbooks = yaml.full_load(yaml_file)
for play_source in playbooks:
# Create play object, playbook objects use .load instead of init or new methods,
# this will also automatically create the task objects from the info provided in play_source
play = Play().load(play_source,
variable_manager = variable_manager,
loader = loader)
# Run it - instantiate task queue manager, which takes care of forking and setting up all objects to iterate over host list and tasks
tqm = None
try:
tqm = TaskQueueManager(inventory = inventory,
variable_manager = variable_manager,
loader = loader,
passwords = None,
# Use our custom callback instead of the ``default`` callback plugin, which prints to stdout
stdout_callback = results_callback,
)
result = tqm.run(play) # Most interesting data for a play is actually sent to the callback's methods
finally:
# we always need to cleanup child procs and the structures we use to communicate with them
if tqm is not None:
tqm.cleanup()
# Remove ansible tmpdir
shutil.rmtree(tmp_path, True)
# Change working directory back
os.chdir(cwd)
# Dump Logs to File
# Logs are in eden/uploads/playbook instead of /tmp, however it works
log_file_name = "%s.log" % playbook.split(".")[0]
log_path = os.path.join("/", "tmp", log_file_name)
with open(log_path, "w") as log_file:
log_file.write(logger.log)
# Dump Logs to Database
# This gets deleted:
#current.db(current.s3db.scheduler_run.id == W2P_TASK.run_id).update(run_output = logger.log)
if instance_id:
# Upload logs to Database
table = current.s3db.setup_instance
field = table.log_file
with open(log_path, "rb") as log_file:
newfilename = field.store(log_file,
log_file_name,
field.uploadfolder)
current.db(table.id == instance_id).update(log_file = newfilename)
return result
# =============================================================================
def setup_instance_settings_read(instance_id, deployment_id):
"""
Read the Settings for an instance from models/000_config.py
- called onaccept from instance creation
- called by interactive method to read
"""
from gluon.cfs import getcfs
from gluon.compileapp import build_environment
from gluon.globals import Request, Response, Session
from gluon.restricted import restricted
# Read current settings from file
folder = current.request.folder
model = "%s/models/000_config.py" % folder
code = getcfs(model, model, None)
request = Request({})
request.controller = "dontrunany"
request.folder = folder
response = Response()
session = Session()
#session.connect(request, response)
environment = build_environment(request, response, session, store_current=False)
environment["settings"] = Storage2()
restricted(code, environment, layer=model)
nested_settings = environment["settings"]
# Flatten settings
file_settings = {}
for section in nested_settings:
if section == "database":
# Filter out DB settings as these need special handling
continue
subsection = nested_settings[section]
for setting in subsection:
if setting in ("hmac_key", "template"):
# Filter out settings which need special handling
continue
file_settings["%s.%s" % (section, setting)] = subsection[setting]
# Read Settings currently in Database
db = current.db
stable = current.s3db.setup_setting
id_field = stable.id
query = (stable.instance_id == instance_id) & \
(stable.deleted == False)
db_settings = db(query).select(id_field,
stable.setting,
#stable.current_value,
stable.new_value,
).as_dict(key = "setting")
db_get = db_settings.get
# Ensure that database looks like file
checked_settings = []
cappend = checked_settings.append
from gluon.serializers import json as jsons # Need support for T()
for setting in file_settings:
current_value = file_settings[setting]
if not isinstance(current_value, basestring):
# NB Storage & OrderedDict will come out as dict
current_value = jsons(current_value)
s = db_get(setting)
if s:
# We update even if not changed so as to update modified_on
db(id_field == s["id"]).update(current_value = current_value)
else:
stable.insert(deployment_id = deployment_id,
instance_id = instance_id,
setting = setting,
current_value = current_value,
)
cappend(setting)
# Handle db_settings not in file_settings
for setting in db_settings:
if setting in checked_settings:
continue
s = db_get(setting)
if s["new_value"] is not None:
db(id_field == s["id"]).update(current_value = None)
else:
db(id_field == s["id"]).update(deleted = True)
# =============================================================================
def setup_instance_method(instance_id, method="start"):
"""
Run individual Ansible Roles ('methods')
e.g. Start, Stop or Clean an Instance
- called by interactive method to start/stop
"""
db = current.db
s3db = current.s3db
folder = current.request.folder
playbook = []
# Get Instance details
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.deployment_id,
itable.type,
limitby = (0, 1)
).first()
deployment_id = instance.deployment_id
# Get Server(s) details
stable = s3db.setup_server
query = (stable.deployment_id == deployment_id) & \
(stable.role == 1)
server = db(query).select(stable.name,
stable.host_ip,
stable.private_key,
stable.remote_user,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
return(current.T("Method failed: SSH Key needed when running away from localhost"))
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
# Get Deployment details
dtable = s3db.setup_deployment
deployment = db(dtable.id == deployment_id).select(dtable.db_type,
dtable.webserver_type,
limitby = (0, 1)
).first()
# Build Playbook data structure
#roles_path = os.path.join(folder, "private", "eden_deploy", "roles")
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"vars": {"db_type": DB_SERVERS[deployment.db_type],
"web_server": WEB_SERVERS[deployment.webserver_type],
"type": INSTANCE_TYPES[instance.type],
},
"roles": [{ "role": method }, #"%s/%s" % (roles_path, method)
]
})
# Write Playbook
name = "%s_%d" % (method, int(time.time()))
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# =============================================================================
def setup_modules_apply(instance_id, modules):
"""
Method to enable/disable Modules in an instance
via models/000_config.py
"""
db = current.db
s3db = current.s3db
playbook = []
# Lookup Instance details
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.id,
itable.deployment_id,
itable.type,
limitby = (0, 1)
).first()
deployment_id = instance.deployment_id
# Lookup Server Details
# @ToDo: Support multiple Eden servers used as Load-balancers
svtable = s3db.setup_server
query = (svtable.deployment_id == deployment_id) & \
(svtable.role.belongs((1, 4)))
server = db(query).select(svtable.name,
svtable.host_ip,
svtable.remote_user,
svtable.private_key,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
return(current.T("Apply failed: SSH Key needed when applying away from localhost"))
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
appname = "eden" # @ToDo: Allow this to be configurable
instance_type = INSTANCE_TYPES[instance.type]
dest = "/home/%s/applications/%s/models/000_config.py" % (instance_type, appname)
# @ToDo: Lookup webserver_type from deployment once we support Apache
#service_name = "apache2"
#service_name = "uwsgi-%s" % instance_type
settings = current.deployment_settings
has_module = settings.has_module
all_pages = settings.get_setup_wizard_questions()
modules_page = all_pages[0]
dependencies = {}
labels = {}
for m in modules_page["modules"]:
labels[m["module"]] = m["label"]
d = m.get("dependencies")
if d is not None:
dependencies[m["module"]] = d
# Build List of Tasks
# This currently only works for Local Server!
tasks = []
tappend = tasks.append
for module in modules:
new_value = modules[module]
if new_value == "True":
if has_module(module):
# No changes required
# This is only the case for Local Server
continue
tappend({"name": "If we disabled the module, then remove the disabling",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^del settings.modules\["%s"\]' % module,
"state": "absent",
},
"register": "default",
})
label = labels.get("module")
tappend({"name": "Enable the Module",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^settings.modules\["%s"\]' % module,
"line": 'settings.modules["%s"] = {"name_nice": T("%s"), "module_type": 10}' % (module, label),
},
"when": "not default.found",
})
deps = dependencies.get(module, {})
for d in deps:
m = d.get("module")
if has_module(m):
# No changes required
# This is only the case for Local Server
continue
tappend({"name": "Handle Dependency: If we disabled the module, then remove the disabling",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^del settings.modules\["%s"\]' % m,
"state": "absent",
},
"register": "default",
})
tappend({"name": "Handle Dependency: Enable the Module",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^settings.modules\["%s"\]' % module,
"line": 'settings.modules["%s"] = {"name_nice": T("%s"), "module_type": 10}' % (m, d.get("label")),
},
"when": "not default.found",
})
else:
if not has_module(module):
# No changes required
# This is only the case for Local Server
continue
tappend({"name": "If we enabled the module, then remove the enabling",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^settings.modules\["%s"\]' % module,
"state": "absent",
},
"register": "default",
})
tappend({"name": "Disable the module",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": '^del settings.modules\["%s"\]' % module,
"line": 'del settings.modules["%s"]' % module,
},
"when": "not default.found",
})
tasks += [# @ToDo: Handle case where need to restart multiple webservers
{"name": "Migrate & Restart WebServer",
# We don't want to restart the UWSGI process running the Task until after the Task has completed
"shell": 'echo "/usr/local/bin/migrate %s" | at now + 1 minutes' % instance_type,
"become": "yes",
},
]
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"tasks": tasks,
})
# Write Playbook
name = "apply_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# =============================================================================
def setup_setting_apply(setting_id):
"""
Apply a Setting to an instance via models/000_config.py
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
playbook = []
# Lookup Setting Details
stable = s3db.setup_setting
setting = db(stable.id == setting_id).select(stable.id,
stable.deployment_id,
stable.instance_id,
stable.setting,
stable.new_value,
limitby = (0, 1)
).first()
# Lookup Server Details
# @ToDo: Support multiple Eden servers used as Load-balancers
svtable = s3db.setup_server
query = (svtable.deployment_id == setting.deployment_id) & \
(svtable.role.belongs((1, 4)))
server = db(query).select(svtable.name,
svtable.host_ip,
svtable.remote_user,
svtable.private_key,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
return(current.T("Apply failed: SSH Key needed when applying away from localhost"))
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
appname = "eden" # @ToDo: Allow this to be configurable
new_value = setting.new_value
instance_id = setting.instance_id
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.type,
limitby = (0, 1)
).first()
instance_type = INSTANCE_TYPES[instance.type]
# @ToDo: Lookup webserver_type from deployment once we support Apache
#service_name = "apache2"
service_name = "uwsgi-%s" % instance_type
# Build Playbook data structure:
the_setting = setting.setting
if new_value is True or new_value is False:
new_line = "settings.%s = %s" % (the_setting, new_value)
else:
# @ToDo: Handle lists/dicts (load into JSONS3?)
new_line = 'settings.%s = "%s"' % (the_setting, new_value)
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"tasks": [{"name": "Edit 000_config.py",
"become": "yes",
"lineinfile": {"dest": "/home/%s/applications/%s/models/000_config.py" % \
(instance_type, appname),
"regexp": "^settings.%s =" % the_setting,
"line": new_line,
},
},
{"name": "Compile",
"command": "python web2py.py -S %s -M -R applications/%s/static/scripts/tools/compile.py" % \
(appname, appname),
"args": {"chdir": "/home/%s" % instance_type,
},
"become": "yes",
# Admin scripts do this as root, so we need to be able to over-write
#"become_user": "web2py",
},
{"name": "Restart WebServer",
# We don't want to restart the WSGI process running the Task until after the Task has completed
#"service": {"name": service_name,
# "state": "restarted",
# },
"shell": 'echo "service %s restart" | at now + 1 minutes' % service_name,
"become": "yes",
},
]
})
# Write Playbook
name = "apply_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# Update the DB to show that the setting has been applied
# @ToDo: Do this as a callback from the async task
setting.update_record(current_value = new_value,
new_value = None,
)
# =============================================================================
def setup_settings_apply(instance_id, settings):
"""
Method to Apply Settings to an instance
via models/000_config.py
"""
db = current.db
s3db = current.s3db
playbook = []
# Lookup Instance details
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.id,
itable.deployment_id,
itable.type,
limitby = (0, 1)
).first()
deployment_id = instance.deployment_id
# Lookup Server Details
# @ToDo: Support multiple Eden servers used as Load-balancers
svtable = s3db.setup_server
query = (svtable.deployment_id == deployment_id) & \
(svtable.role.belongs((1, 4)))
server = db(query).select(svtable.name,
svtable.host_ip,
svtable.remote_user,
svtable.private_key,
limitby = (0, 1)
).first()
host_ip = server.host_ip
if host_ip == "127.0.0.1":
connection = "local"
else:
provided_key = server.private_key
if not provided_key:
# Abort
return(current.T("Apply failed: SSH Key needed when applying away from localhost"))
connection = "smart"
tasks = []
# Copy the Private Key to where it will be used
provided_key = os.path.join(current.request.folder, "uploads", provided_key)
private_key = "/tmp/%s.pem" % server.name
tasks.append({"copy": {"src": provided_key,
"dest": private_key,
"mode": "0600",
},
})
# Add instance to host group (to associate private_key)
tasks.append({"add_host": {"hostname": host_ip,
"groupname": "launched",
"ansible_ssh_private_key_file": private_key,
},
})
playbook.append({"hosts": "localhost",
"connection": "local",
"gather_facts": "no",
"tasks": tasks,
})
host_ip = "launched"
appname = "eden" # @ToDo: Allow this to be configurable
instance_type = INSTANCE_TYPES[instance.type]
dest = "/home/%s/applications/%s/models/000_config.py" % (instance_type, appname)
# @ToDo: Lookup webserver_type from deployment once we support Apache
#service_name = "apache2"
service_name = "uwsgi-%s" % instance_type
# Build List of Tasks
tasks = []
tappend = tasks.append
for setting in settings:
the_setting = setting.replace("_", ".", 1)
new_value = settings[setting]
if new_value == "True" or new_value == "False":
new_line = "settings.%s = %s" % (the_setting, new_value)
else:
# @ToDo: Handle lists/dicts (load into JSONS3?)
new_line = 'settings.%s = "%s"' % (the_setting, new_value)
tappend({"name": "Edit 000_config.py",
"become": "yes",
"lineinfile": {"dest": dest,
"regexp": "^settings.%s =" % the_setting,
"line": new_line,
},
})
tasks += [{"name": "Compile",
"command": "python web2py.py -S %s -M -R applications/%s/static/scripts/tools/compile.py" % \
(appname, appname),
"args": {"chdir": "/home/%s" % instance_type,
},
"become": "yes",
# Admin scripts do this as root, so we need to be able to over-write
#"become_user": "web2py",
},
# @ToDo: Handle case where need to restart multiple webservers
{"name": "Restart WebServer",
# We don't want to restart the UWSGI process running the Task until after the Task has completed
#"service": {"name": service_name,
# "state": "restarted",
# },
"shell": 'echo "service %s restart" | at now + 1 minutes' % service_name,
"become": "yes",
},
]
playbook.append({"hosts": host_ip,
"connection": connection,
"remote_user": server.remote_user,
"become_method": "sudo",
#"become_user": "root",
"tasks": tasks,
})
# Write Playbook
name = "apply_%d" % int(time.time())
task_vars = setup_write_playbook("%s.yml" % name,
playbook,
)
# Run the Playbook
task_vars["instance_id"] = instance_id # To Upload Logs to Instance record
current.s3task.schedule_task(name,
vars = task_vars,
function_name = "setup_run_playbook",
repeats = None,
timeout = 6000,
#sync_output = 300
)
# Update the DB to show that the settings have been applied
# @ToDo: Do this as a callback from the async task
instance.update_record(configured = True)
stable = s3db.setup_setting
q = (stable.instance_id == instance_id)
for setting in settings:
the_setting = setting.replace("_", ".", 1)
new_value = settings[setting]
db(q & (stable.setting == the_setting)).update(current_value = new_value,
new_value = None)
# =============================================================================
class QuotedDouble(str):
"""
Ensure that strings are double-quoted when output in YAML
https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data#answer-8641732
"""
pass
# =============================================================================
class QuotedSingle(str):
"""
Ensure that strings are single-quoted when output in YAML
https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data#answer-8641732
"""
pass
# =============================================================================
class Literal(str):
"""
Ensure that multiline strings are output as a block literal in YAML
https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data#answer-8641732
"""
pass
# =============================================================================
class Storage2(Storage):
"""
Read settings.x.y without needing to first create settings.x
"""
def __getattr__(self, key):
value = dict.get(self, key)
if value is None:
self[key] = value = Storage2()
return value
def __call__(self):
"""
settings.import_template()
"""
return
# =============================================================================
class setup_DeploymentRepresent(S3Represent):
def __init__(self):
"""
Constructor
"""
super(setup_DeploymentRepresent, self).__init__(lookup = "setup_deployment",
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom look-up of rows
@param key: the key field
@param values: the values to look up
@param fields: unused (retained for API compatibility)
"""
dtable = self.table
itable = current.s3db.setup_instance
count = len(values)
if count == 1:
query = (dtable.id == values[0])
else:
query = (dtable.id.belongs(values))
left = itable.on((itable.deployment_id == dtable.id) & (itable.type == 1))
rows = current.db(query).select(dtable.id,
itable.url,
left = left,
limitby = (0, count),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
if not hasattr(row, "setup_instance"):
return str(row.id)
return row.setup_instance.url
# =============================================================================
class setup_MonitorTaskRepresent(S3Represent):
def __init__(self):
"""
Constructor
"""
super(setup_MonitorTaskRepresent, self).__init__(lookup = "setup_monitor_task",
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom look-up of rows
@param key: the key field
@param values: the values to look up
@param fields: unused (retained for API compatibility)
"""
db = current.db
table = self.table
#stable = db.setup_server
ctable = db.setup_monitor_check
count = len(values)
if count == 1:
query = (table.id == values[0])
else:
query = (table.id.belongs(values))
left = [#stable.on(stable.id == table.server_id),
ctable.on(ctable.id == table.check_id),
]
rows = db(query).select(table.id,
#stable.name,
#stable.host_ip,
ctable.name,
left = left,
limitby = (0, count),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
#return "%s (%s): %s" % (row["setup_server.name"],
# row["setup_server.host_ip"],
# row["setup_monitor_check.name"],
# )
return row["setup_monitor_check.name"]
# =============================================================================
def setup_rheader(r, tabs=None):
""" Resource component page header """
rheader = None
if r.representation == "html" and r.id:
T = current.T
r_name = r.name
if r_name == "deployment":
tabs = [(T("Deployment Details"), None),
(T("Servers"), "server"),
(T("Instances"), "instance"),
(T("Settings"), "setting"),
(T("Monitoring"), "monitor_task"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
button = A(T("Configuration Wizard"),
_class="action-btn",
_href=URL(c="setup", f="deployment",
args = [r.id, "wizard"],
),
)
rheader = DIV(button,
rheader_tabs)
if r_name == "server":
tabs = [(T("Server Details"), None),
# Inline form instead of Tab
#(T("Monitoring"), "monitor_server"),
(T("Monitor Tasks"), "monitor_task"),
(T("Monitor Logs"), "monitor_log"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(rheader_tabs)
elif r_name == "monitor_check":
#tabs = [(T("Check Details"), None),
# ]
#rheader_tabs = s3_rheader_tabs(r, tabs)
#record = r.record
#table = r.table
#rheader = DIV(TABLE(TR(TH("%s: " % table.name.label),
# record.name),
# TR(TH("%s: " % table.function_name.label),
# record.function_name),
# TR(TH("%s: " % table.comments.label),
# record.comments or ""),
# ), rheader_tabs)
# No tabs => No need for rheader
rheader = None
elif r_name == "monitor_task":
tabs = [(T("Task Details"), None),
(T("Logs"), "monitor_run"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
record = r.record
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.server_id.label),
table.server_id.represent(record.server_id)),
TR(TH("%s: " % table.check_id.label),
table.check_id.represent(record.check_id)),
TR(TH("%s: " % table.status.label),
table.status.represent(record.status)),
TR(TH("%s: " % table.enabled.label),
record.enabled),
TR(TH("%s: " % table.comments.label),
record.comments or ""),
), rheader_tabs)
return rheader
# END =========================================================================
|
#!/usr/bin/env python
from os.path import dirname, abspath
import sys
from django.conf import settings
if not settings.configured:
settings_dict = dict(
INSTALLED_APPS=["filepages", ],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
},
},
SILENCED_SYSTEM_CHECKS=["1_7.W001"],
)
settings.configure(**settings_dict)
import django
django.setup()
def runtests(test_labels):
sys.path.insert(0, dirname(abspath(__file__)))
from django.test.runner import DiscoverRunner
failures = DiscoverRunner(
verbosity=1,
interactive=True,
failfast=False,
).run_tests(test_labels)
sys.exit(failures)
if __name__ == "__main__":
labels = sys.argv[1:] or [
"filepages",
]
runtests(labels)
|
# Copyright (c) 2018, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.setup.doctype.company.company import install_country_fixtures
def execute():
frappe.reload_doc('regional', 'report', 'fichier_des_ecritures_comptables_[fec]')
for d in frappe.get_all('Company', filters = {'country': 'France'}):
install_country_fixtures(d.name)
|
import random
import numpy as np
# A modification of Detective. Instead of using Tit for Tat when the opponent betrays you it uses the much more agressive Forgiving Tit for Tat which will only forgive you when you are nice for two consecutive turns
#
# Better DETECTIVE: First: I analyze you. I start:
# Cooperate, Defect, Cooperate, Cooperate.
# If you defect back, I'll act like [Forgiving Tit for Tat].
# If you never defect back, I'll act like [alwaysDefect],
# to exploit you. Elementary, my dear Watson.
# - nobody5050
# Reminder: For the history array, "cooperate" = 1, "defect" = 0
def strategy(history, memory):
testingSchedule = ["cooperate", "defect", "cooperate", "cooperate"]
gameLength = history.shape[1]
shallIExploit = memory
choice = None
if gameLength < 4: # We're still in that initial testing stage.
choice = testingSchedule[gameLength]
elif (
gameLength == 4
): # Time to analyze the testing stage and decide what to do based on what the opponent did in that time!
opponentsActions = history[1]
if (
np.count_nonzero(opponentsActions - 1) == 0
): # The opponent cooperated all 4 turns! Never defected!
shallIExploit = True # Let's exploit forever.
else:
shallIExploit = False # Let's switch to Forgiving Tit For Tat.
if gameLength >= 4:
if shallIExploit:
choice = "defect"
else:
choice = "cooperate"
if (
history.shape[1] >= 2 and history[1, -1] == 0 and history[1, -2] == 0
): # We check the TWO most recent turns to see if BOTH were defections, and only then do we defect too.
choice = "defect"
return choice, shallIExploit
|
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Antonin Bas (antonin@barefootnetworks.com)
#
#
# -*- coding: utf-8 -*-
# JSON format documentation available at:
# https://github.com/p4lang/behavioral-model/blob/master/docs/JSON_format.md
from collections import defaultdict, OrderedDict
from util.topo_sorting import Graph
import re
from copy import copy
import logging
import sys
p4 = None
_STATIC_VARS = []
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def LOG_CRITICAL(msg, *args, **kwargs): # pragma: no cover
logger.critical(msg, *args, **kwargs)
logging.shutdown()
sys.exit(1)
def LOG_WARNING(msg, *args, **kwargs): # pragma: no cover
logger.warning(msg, *args, **kwargs)
def LOG_INFO(msg, *args, **kwargs): # pragma: no cover
logger.info(msg, *args, **kwargs)
def static_var(varname, value):
def decorate(func):
_STATIC_VARS.append((func, varname, value))
setattr(func, varname, copy(value))
return func
return decorate
def reset_static_vars():
for(func, varname, value) in _STATIC_VARS:
setattr(func, varname, copy(value))
def header_length_exp_format(p4_expression, fields):
def find_idx(name):
for idx, field in enumerate(fields):
if name == field:
return idx
return -1
if type(p4_expression) is p4.p4_expression:
new_expr = p4.p4_expression(op=p4_expression.op)
new_expr.left = header_length_exp_format(p4_expression.left, fields)
new_expr.right = header_length_exp_format(p4_expression.right, fields)
return new_expr
elif type(p4_expression) is str: # refers to field in same header
idx = find_idx(p4_expression)
assert(idx >= 0)
# trick so that dump_expression uses local for this
return p4.p4_signature_ref(idx)
else:
return p4_expression
def add_pragmas(json_item, p4_object):
json_item["pragmas"] = list(p4_object._pragmas)
def dump_header_types(json_dict, hlir, keep_pragmas=False):
header_types = []
id_ = 0
for name, p4_header in hlir.p4_headers.items():
header_type_dict = OrderedDict()
header_type_dict["name"] = name
header_type_dict["id"] = id_
id_ += 1
fixed_width = 0
for field, bit_width in p4_header.layout.items():
if bit_width != p4.P4_AUTO_WIDTH:
fixed_width += bit_width
fields = []
for field, bit_width in p4_header.layout.items():
if bit_width == p4.P4_AUTO_WIDTH:
bit_width = "*"
signed = False
saturating = p4.P4_SATURATING in p4_header.attributes[field]
# to avoid too many differences with previous JSON outputs
if saturating:
fields.append([field, bit_width, signed, saturating])
else:
fields.append([field, bit_width])
header_type_dict["fields"] = fields
length_exp = None
max_length = None
if p4_header.flex_width:
length_exp = header_length_exp_format(p4_header.length,
zip(*fields)[0])
# bm expects a length in bits
length_exp = p4.p4_expression(length_exp, "*", 8)
length_exp = p4.p4_expression(length_exp, "-", fixed_width)
length_exp = dump_expression(length_exp)
max_length = p4_header.max_length
header_type_dict["length_exp"] = length_exp
header_type_dict["max_length"] = max_length
if keep_pragmas:
add_pragmas(header_type_dict, p4_header)
header_types.append(header_type_dict)
json_dict["header_types"] = header_types
def dump_headers(json_dict, hlir, keep_pragmas=False):
headers = []
id_ = 0
for name, p4_header_instance in hlir.p4_header_instances.items():
if p4_header_instance.virtual:
continue
header_instance_dict = OrderedDict()
header_instance_dict["name"] = name
header_instance_dict["id"] = id_
id_ += 1
header_instance_dict["header_type"] =\
p4_header_instance.header_type.name
header_instance_dict["metadata"] = p4_header_instance.metadata
if keep_pragmas:
add_pragmas(header_instance_dict, p4_header_instance)
headers.append(header_instance_dict)
json_dict["headers"] = headers
def dump_header_stacks(json_dict, hlir, keep_pragmas=False):
header_stacks = []
class HST:
def __init__(self, name, size, header_type):
self.name = name
self.size = size
self.header_type = header_type
self.ids = []
def add_header_id(self, header_id):
self.ids.append(header_id)
my_stacks = {}
header_id = 0
for name, p4_header_instance in hlir.p4_header_instances.items():
if p4_header_instance.virtual:
continue
header_id += 1
if p4_header_instance.max_index is None:
continue
base_name = p4_header_instance.base_name
if base_name not in my_stacks:
my_stacks[base_name] = HST(base_name,
p4_header_instance.max_index + 1,
p4_header_instance.header_type.name)
my_stacks[base_name]._pragmas = p4_header_instance._pragmas
my_stacks[base_name].add_header_id(header_id - 1)
id_ = 0
for base_name, hst in my_stacks.items():
header_stack_dict = OrderedDict()
header_stack_dict["name"] = base_name
header_stack_dict["id"] = id_
id_ += 1
header_stack_dict["size"] = hst.size
header_stack_dict["header_type"] = hst.header_type
header_stack_dict["header_ids"] = hst.ids
if keep_pragmas:
add_pragmas(header_stack_dict, hst)
header_stacks.append(header_stack_dict)
json_dict["header_stacks"] = header_stacks
def field_suffix(p4_field):
suffix = p4_field.name
if suffix == "valid":
suffix = "$valid$"
return suffix
def format_field_ref(p4_field):
header = p4_field.instance
prefix = header.name
if header.virtual:
prefix = header.base_name
suffix = field_suffix(p4_field)
return [prefix, suffix]
def header_type_field_offset(p4_header_type, fname):
for idx, f in enumerate(p4_header_type.layout):
if f == fname:
return idx
LOG_CRITICAL("No field {} in header type {}".format( # pragma: no cover
fname, p4_header_type.name))
def format_field_ref_expression(p4_field, in_expression=True):
header = p4_field.instance
suffix = field_suffix(p4_field)
expr = OrderedDict()
# support for hs[last].f in expressions
if header.virtual:
assert(header.index in {p4.P4_NEXT, p4.P4_LAST})
if header.index == p4.P4_NEXT: # pragma: no cover
LOG_CRITICAL(
"'next' is not supported as a stack index in expressions")
def make_expression(op, L, R):
e = OrderedDict(
[("type", "expression"), ("value", OrderedDict(
[("op", op), ("left", L), ("right", R)]))])
return e
hs = OrderedDict(
[("type", "header_stack"), ("value", header.base_name)])
e = make_expression(
"access_field",
make_expression(
"dereference_stack",
hs,
make_expression("last_stack_index", None, hs)),
header_type_field_offset(header.header_type, suffix))
if not in_expression:
expr["type"] = "expression"
expr["value"] = e
else:
expr = e
else:
expr["type"] = "field"
expr["value"] = format_field_ref(p4_field)
return expr
def format_hexstr(i):
# Python appends a L at the end of a long number representation, which we
# need to remove
return hex(i).rstrip("L")
# for p4 v1.1
def is_register_ref(obj):
try:
return (type(obj) is p4.p4_register_ref)
except AttributeError:
return False
def format_register_ref(p4_register_ref):
return [p4_register_ref.register_name, dump_expression(p4_register_ref.idx)]
def build_match_value(widths, value):
res = ""
for width in reversed(widths):
mask = (1 << width) - 1
val = value & mask
num_bytes = (width + 7) / 8
res = "{0:0{1}x}".format(val, 2 * num_bytes) + res
value = value >> width
return "0x" + res
def get_match_value_width(widths):
return sum([(width + 7) / 8 for width in widths])
@static_var("parse_state_id", 0)
def dump_one_parser(parser_name, parser_id, p4_start_state, keep_pragmas=False):
parser_dict = OrderedDict()
parser_dict["name"] = parser_name
parser_dict["id"] = parser_id
parser_dict["init_state"] = p4_start_state.name
parse_states = []
accessible_parse_states = set()
accessible_parse_states_ordered = []
def find_accessible_parse_states(parse_state):
if parse_state in accessible_parse_states:
return
accessible_parse_states.add(parse_state)
accessible_parse_states_ordered.append(parse_state)
for _, next_state in parse_state.branch_to.items():
if isinstance(next_state, p4.p4_parse_state):
find_accessible_parse_states(next_state)
find_accessible_parse_states(p4_start_state)
for p4_parse_state in accessible_parse_states_ordered:
parse_state_dict = OrderedDict()
parse_state_dict["name"] = p4_parse_state.name
parse_state_dict["id"] = dump_one_parser.parse_state_id
dump_one_parser.parse_state_id += 1
parser_ops = []
for parser_op in p4_parse_state.call_sequence:
parser_op_dict = OrderedDict()
op_type = parser_op[0]
parameters = []
if op_type == p4.parse_call.extract:
parser_op_dict["op"] = "extract"
header = parser_op[1]
param_dict = OrderedDict()
if header.virtual:
param_dict["type"] = "stack"
param_dict["value"] = header.base_name
else:
param_dict["type"] = "regular"
param_dict["value"] = header.name
parameters.append(param_dict)
elif op_type == p4.parse_call.set:
parser_op_dict["op"] = "set"
dest_field, src = parser_op[1], parser_op[2]
assert(type(dest_field) is p4.p4_field and
"parser assignment target should be a field")
dest_dict = OrderedDict()
src_dict = OrderedDict()
dest_dict["type"] = "field"
dest_dict["value"] = format_field_ref(dest_field)
parameters.append(dest_dict)
if type(src) is int or type(src) is long:
src_dict["type"] = "hexstr"
src_dict["value"] = format_hexstr(src)
elif type(src) is p4.p4_field:
src_dict = format_field_ref_expression(src, False)
elif type(src) is tuple:
src_dict["type"] = "lookahead"
src_dict["value"] = list(src)
elif type(src) is p4.p4_expression:
src_dict["type"] = "expression"
src_dict["value"] = dump_expression(src)
else: # pragma: no cover
LOG_CRITICAL("invalid src type for set_metadata: %s",
type(src))
parameters.append(src_dict)
else: # pragma: no cover
LOG_CRITICAL("invalid parser operation: %s", op_type)
parser_op_dict["parameters"] = parameters
parser_ops.append(parser_op_dict)
parse_state_dict["parser_ops"] = parser_ops
transition_key = []
field_widths = []
for switch_ref in p4_parse_state.branch_on:
switch_ref_dict = OrderedDict()
if type(switch_ref) is p4.p4_field:
field_widths.append(switch_ref.width)
header = switch_ref.instance
if header.virtual:
switch_ref_dict["type"] = "stack_field"
else:
switch_ref_dict["type"] = "field"
switch_ref_dict["value"] = format_field_ref(switch_ref)
elif type(switch_ref) is tuple:
field_widths.append(switch_ref[1])
switch_ref_dict["type"] = "lookahead"
switch_ref_dict["value"] = list(switch_ref)
else: # pragma: no cover
LOG_CRITICAL("not supported")
transition_key.append(switch_ref_dict)
parse_state_dict["transition_key"] = transition_key
transitions = []
for branch_case, next_state in p4_parse_state.branch_to.items():
transition_dict = OrderedDict()
value, mask, type_ = None, None, None
if branch_case == p4.P4_DEFAULT:
type_ = "default"
elif type(branch_case) is int:
type_ = "hexstr"
value = build_match_value(field_widths, branch_case)
elif type(branch_case) is tuple:
type_ = "hexstr"
value, mask = (build_match_value(field_widths, branch_case[0]),
build_match_value(field_widths, branch_case[1]))
elif type(branch_case) is p4.p4_parse_value_set:
type_ = "parse_vset"
value = branch_case.name
# mask not supported yet in compiler, even though it is
# supported in bmv2
mask = None
vset_bits = sum(field_widths)
if value in dump_parsers.vset_widths:
curr_bits = dump_parsers.vset_widths[value]
if curr_bits != vset_bits: # pragma: no cover
LOG_CRITICAL("when parser value set used multiple "
"times, widths cannot clash")
else:
dump_parsers.vset_widths[value] = vset_bits
else: # pragma: no cover
LOG_CRITICAL("invalid parser branching")
transition_dict["type"] = type_
transition_dict["value"] = value
transition_dict["mask"] = mask
if isinstance(next_state, p4.p4_parse_state):
transition_dict["next_state"] = next_state.name
else:
# we do not support control flows here anymore
transition_dict["next_state"] = None
transitions.append(transition_dict)
parse_state_dict["transitions"] = transitions
if keep_pragmas:
add_pragmas(parse_state_dict, p4_parse_state)
parse_states.append(parse_state_dict)
parser_dict["parse_states"] = parse_states
return parser_dict
@static_var("vset_widths", {})
def dump_parsers(json_dict, hlir, keep_pragmas=False):
parsers = []
parser_id = 0
for name, p4_parse_state in hlir.p4_parse_states.items():
# print 'DBG|P4C_BM|gen_json|dump_parsers:', name, p4_parse_state
new_name = None
if name == "start":
new_name = "parser"
elif "packet_entry" in p4_parse_state._pragmas:
new_name = name
if new_name:
parsers.append(dump_one_parser(
new_name, parser_id, p4_parse_state, keep_pragmas=keep_pragmas))
parser_id += 1
json_dict["parsers"] = parsers
def dump_parse_vsets(json_dict, hlir, keep_pragmas=False):
vsets = []
vset_id = 0
for name, vset in hlir.p4_parse_value_sets.items():
if name not in dump_parsers.vset_widths: # pragma: no cover
LOG_WARNING("Parser value set {} not used, cannot infer width; "
"removing it".format(name))
continue
vset_dict = OrderedDict()
vset_dict["name"] = name
vset_dict["id"] = vset_id
vset_id += 1
vset_dict["compressed_bitwidth"] = dump_parsers.vset_widths[name]
if keep_pragmas:
add_pragmas(vset_dict, vset)
vsets.append(vset_dict)
json_dict["parse_vsets"] = vsets
def process_forced_header_ordering(hlir, ordering):
p4_ordering = []
for hdr_name in ordering:
if hdr_name in hlir.p4_header_instances:
p4_ordering.append(hlir.p4_header_instances[hdr_name])
elif hdr_name + "[0]" in hlir.p4_header_instances:
hdr_0 = hlir.p4_header_instances[hdr_name + "[0]"]
for index in xrange(hdr_0.max_index + 1):
indexed_name = hdr_name + "[" + str(index) + "]"
p4_ordering.append(hlir.p4_header_instances[indexed_name])
else:
return None
return p4_ordering
def produce_parser_topo_sorting(hlir, p4_start_state):
header_graph = Graph()
# Helps reduce the running time of this function by caching visited
# states. I claim that new edges cannot be added to the graph if I end up at
# the same parse state, with the same previous node and the same tag stacks
# indices.
class State:
def __init__(self, parse_state, prev_hdr_node, tag_stacks_index):
self.current_state = parse_state
self.prev_hdr_node = prev_hdr_node
self.stacks = frozenset(tag_stacks_index.items())
def __eq__(self, other):
return (self.current_state == other.current_state)\
and (self.prev_hdr_node == other.prev_hdr_node)\
and (self.stacks == other.stacks)
def __hash__(self):
return hash((self.current_state, self.prev_hdr_node, self.stacks))
def __ne__(self, other): # pragma: no cover
return not (self == other)
def __str__(self): # pragma: no cover
return "{}, {}, {}".format(
self.current_state, self.prev_hdr_node, self.stacks)
# Now that I have recursion_states, do I still need visited?
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index, visited,
recursion_states):
assert(isinstance(parse_state, p4.p4_parse_state))
rec_state = State(parse_state, prev_hdr_node, tag_stacks_index)
if rec_state in recursion_states:
return
recursion_states.add(rec_state)
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == p4.parse_call.extract:
hdr = call[1]
if hdr.virtual:
base_name = hdr.base_name
current_index = tag_stacks_index[base_name]
if current_index > hdr.max_index:
return
tag_stacks_index[base_name] += 1
name = base_name + "[%d]" % current_index
hdr = hlir.p4_header_instances[name]
# takes care of loops in parser (e.g. for TLV parsing)
elif parse_state in visited:
return
if hdr not in header_graph:
header_graph.add_node(hdr)
hdr_node = header_graph.get_node(hdr)
if prev_hdr_node:
prev_hdr_node.add_edge_to(hdr_node)
else:
header_graph.root = hdr
prev_hdr_node = hdr_node
for branch_case, next_state in parse_state.branch_to.items():
if not next_state:
continue
if not isinstance(next_state, p4.p4_parse_state):
continue
walk_rec(hlir, next_state, prev_hdr_node,
tag_stacks_index.copy(), visited | {parse_state},
recursion_states)
for pragma in p4_start_state._pragmas:
try:
words = pragma.split()
if words[0] != "header_ordering":
continue
except: # pragma: no cover
continue
sorting = process_forced_header_ordering(hlir, words[1:])
if sorting is None: # pragma: no cover
LOG_CRITICAL("invalid 'header_ordering' pragma")
return sorting
walk_rec(hlir, p4_start_state, None, defaultdict(int), set(), set())
header_topo_sorting = header_graph.produce_topo_sorting()
if header_topo_sorting is None: # pragma: no cover
LOG_CRITICAL("could not produce topo sorting because of cycles")
return header_topo_sorting
@static_var("header_set", set())
def dump_one_deparser(deparser_name, deparser_id, p4_start_state, hlir):
deparser_dict = OrderedDict()
deparser_dict["name"] = deparser_name
deparser_dict["id"] = deparser_id
deparser_id = deparser_id
header_topo_sorting = produce_parser_topo_sorting(hlir, p4_start_state)
deparser_order = [hdr.name for hdr in header_topo_sorting]
deparser_dict["order"] = deparser_order
dump_one_deparser.header_set.update(set(header_topo_sorting))
return deparser_dict
def check_added_headers_in_parse_graph(hlir, parsed_header_set, p4_v1_1=False):
# In P4 v1.1 the push primitive is handled a little differently; since that
# version is deprecated, it is not worth implementing that check for it.
if p4_v1_1:
return
table_actions_set = get_p4_action_set(hlir)
for action in table_actions_set:
for call in action.flat_call_sequence:
primitive_name = call[0].name
# In the HLIR, the first argument to 'push' which is a header stack
# in the P4 program is replaced by a reference to the first header
# instance in the stack, which is why we can use the same code for
# add_header and push
if primitive_name not in {"add_header", "push"}:
continue
hdr = call[1][0]
assert(isinstance(hdr, p4.p4_header_instance))
if hdr not in parsed_header_set:
LOG_WARNING("Header '{}' is added by the control flow but "
"is not part of any parse graph, so it cannot be "
"deparsed".format(hdr.name))
def dump_deparsers(json_dict, hlir, p4_v1_1=False):
deparsers = []
deparser_id = 0
for name, p4_parse_state in hlir.p4_parse_states.items():
new_name = None
if name == "start":
new_name = "deparser"
elif "packet_entry" in p4_parse_state._pragmas:
new_name = name
if new_name:
deparsers.append(
dump_one_deparser(new_name, deparser_id, p4_parse_state, hlir))
deparser_id += 1
check_added_headers_in_parse_graph(hlir, dump_one_deparser.header_set,
p4_v1_1=p4_v1_1)
json_dict["deparsers"] = deparsers
def dump_expression(p4_expression):
if p4_expression is None:
return None
expression_dict = OrderedDict()
if type(p4_expression) is int:
expression_dict["type"] = "hexstr"
expression_dict["value"] = format_hexstr(p4_expression)
elif type(p4_expression) is p4.p4_sized_integer:
expression_dict["type"] = "hexstr"
expression_dict["value"] = format_hexstr(p4_expression)
elif type(p4_expression) is bool:
expression_dict["type"] = "bool"
expression_dict["value"] = p4_expression
elif type(p4_expression) is p4.p4_header_instance:
expression_dict["type"] = "header"
expression_dict["value"] = p4_expression.name
elif type(p4_expression) is p4.p4_field:
expression_dict = format_field_ref_expression(p4_expression, True)
elif type(p4_expression) is p4.p4_signature_ref:
expression_dict["type"] = "local"
expression_dict["value"] = p4_expression.idx
elif is_register_ref(p4_expression):
expression_dict["type"] = "register"
expression_dict["value"] = format_register_ref(p4_expression)
else:
expression_dict["type"] = "expression"
expression_dict["value"] = OrderedDict()
if type(p4_expression.op) is p4.p4_expression: # ternary operator
expression_dict["value"]["op"] = "?"
expression_dict["value"]["cond"] = dump_expression(p4_expression.op)
else:
expression_dict["value"]["op"] = p4_expression.op
expression_dict["value"]["left"] = dump_expression(p4_expression.left)
expression_dict["value"]["right"] = dump_expression(p4_expression.right)
# expression_dict["op"] = p4_expression.op
# expression_dict["left"] = dump_expression(p4_expression.left)
# expression_dict["right"] = dump_expression(p4_expression.right)
return expression_dict
def get_nodes(pipe_ptr, node_set):
if pipe_ptr is None:
return
if pipe_ptr in node_set:
return
node_set.add(pipe_ptr)
for next_node in pipe_ptr.next_.values():
get_nodes(next_node, node_set)
def match_type_to_str(p4_match_type):
match_types_map = {
p4.p4_match_type.P4_MATCH_EXACT: "exact",
p4.p4_match_type.P4_MATCH_LPM: "lpm",
p4.p4_match_type.P4_MATCH_TERNARY: "ternary",
p4.p4_match_type.P4_MATCH_VALID: "valid",
p4.p4_match_type.P4_MATCH_RANGE: "range"
}
if p4_match_type not in match_types_map: # pragma: no cover
LOG_CRITICAL("found invalid match type")
return match_types_map[p4_match_type]
def get_table_match_type(p4_table):
match_types = []
for _, m_type, _ in p4_table.match_fields:
match_types.append(match_type_to_str(m_type))
if len(match_types) == 0:
match_type = "exact"
elif "range" in match_types:
match_type = "range"
elif "ternary" in match_types:
match_type = "ternary"
elif match_types.count("lpm") >= 2: # pragma: no cover
LOG_CRITICAL("cannot have 2 different lpm matches in a single table")
elif "lpm" in match_types:
match_type = "lpm"
else:
# that includes the case when we only have one valid match and
# nothing else
match_type = "exact"
return match_type
def get_table_type(p4_table):
act_prof = p4_table.action_profile
if act_prof is None:
table_type = "simple"
elif act_prof.selector is None:
table_type = "indirect"
else:
table_type = "indirect_ws"
return table_type
@static_var("referenced", {})
@static_var("act_prof_id", 0)
def dump_action_profile(pipe_name, action_profiles, p4_action_profile,
keep_pragmas=False):
# check that the same action profile is not referenced across multiple
# control flows. This is somewhat of an artifical restriction imposed by the
# pipeline abstraction in the JSON
if p4_action_profile in dump_action_profile.referenced:
if dump_action_profile.referenced[p4_action_profile] != pipe_name:
LOG_CRITICAL("action profile {} cannot be referenced in different "
"control flows".format(p4_action_profile.name))
else:
dump_action_profile.referenced[p4_action_profile] = pipe_name
act_prof_dict = OrderedDict()
act_prof_dict["name"] = p4_action_profile.name
act_prof_dict["id"] = dump_action_profile.act_prof_id
dump_action_profile.act_prof_id += 1
act_prof_dict["max_size"] = p4_action_profile.size
if p4_action_profile.selector is not None:
p4_selector = p4_action_profile.selector
selector = OrderedDict()
selector["algo"] = p4_selector.selection_key.algorithm
elements = []
assert(len(p4_selector.selection_key.input) == 1)
for field in p4_selector.selection_key.input[0].fields:
element_dict = OrderedDict()
if type(field) is not p4.p4_field: # pragma: no cover
LOG_CRITICAL("only fields supported in field lists")
element_dict["type"] = "field"
element_dict["value"] = format_field_ref(field)
elements.append(element_dict)
selector["input"] = elements
act_prof_dict["selector"] = selector
if keep_pragmas:
add_pragmas(act_prof_dict, p4_action_profile)
action_profiles.append(act_prof_dict)
@static_var("pipeline_id", 0)
@static_var("table_id", 0)
@static_var("condition_id", 0)
def dump_one_pipeline(json_dict, pipe_name, pipe_ptr, hlir, keep_pragmas=False):
def get_table_name(p4_table):
if not p4_table:
return None
return p4_table.name
def table_has_counters(p4_table):
for name, counter in hlir.p4_counters.items():
if counter.binding == (p4.P4_DIRECT, p4_table):
return True
return False
def table_direct_meters(p4_table):
for name, meter in hlir.p4_meters.items():
if meter.binding == (p4.P4_DIRECT, p4_table):
return name
return None
pipeline_dict = OrderedDict()
pipeline_dict["name"] = pipe_name
pipeline_dict["id"] = dump_one_pipeline.pipeline_id
dump_one_pipeline.pipeline_id += 1
pipeline_dict["init_table"] = get_table_name(pipe_ptr)
node_set = set()
get_nodes(pipe_ptr, node_set)
print 'LOG|Gen json for pipeline|node_set:', node_set
tables = []
action_profiles = []
for name, table in hlir.p4_tables.items():
print 'LOG|gen json|p4 tables|name and table:', name, table
if table not in node_set:
continue
table_dict = OrderedDict()
table_dict["name"] = name
table_dict["id"] = dump_one_pipeline.table_id
dump_one_pipeline.table_id += 1
match_type = get_table_match_type(table)
table_dict["match_type"] = match_type
table_dict["type"] = get_table_type(table)
if table_dict["type"] == "indirect" or\
table_dict["type"] == "indirect_ws":
table_dict["action_profile"] = table.action_profile.name
dump_action_profile(pipe_name, action_profiles,
table.action_profile, keep_pragmas=keep_pragmas)
table_dict["max_size"] = table.max_size if table.max_size else 16384
# TODO(antonin): update counters to be the same as direct meters, but
# that would make the JSON non-backwards compatible
table_dict["with_counters"] = table_has_counters(table)
table_dict["direct_meters"] = table_direct_meters(table)
table_dict["support_timeout"] = table.support_timeout
key = []
for field_ref, m_type, mask in table.match_fields:
key_field = OrderedDict()
match_type = match_type_to_str(m_type)
key_field["match_type"] = match_type
if(match_type == "valid"):
if isinstance(field_ref, p4.p4_field):
header_ref = field_ref.instance
else:
header_ref = field_ref
assert(type(header_ref) is p4.p4_header_instance)
key_field["target"] = header_ref.name
else:
key_field["target"] = format_field_ref(field_ref)
if mask:
if match_type == "valid":
LOG_WARNING("a field mask does not make much sense for a "
"valid match")
field_width = 1
else:
assert(isinstance(field_ref, p4.p4_field))
field_width = field_ref.width
# re-using this function (used by parser)
mask = build_match_value([field_width], mask)
LOG_INFO("you are using a mask in a match table, "
"this is still an experimental feature")
else:
mask = None # should aready be the case
key_field["mask"] = mask
key.append(key_field)
table_dict["key"] = key
table_dict["actions"] = [a.name for a in table.actions]
next_tables = OrderedDict()
if "hit" in table.next_:
next_tables["__HIT__"] = get_table_name(table.next_["hit"])
next_tables["__MISS__"] = get_table_name(table.next_["miss"])
else:
for a, nt in table.next_.items():
next_tables[a.name] = get_table_name(nt)
table_dict["next_tables"] = next_tables
# temporarily not covered by tests, because not part of P4 spec
if hasattr(table, "default_action") and\
table.default_action is not None:
LOG_INFO("you are using the default_entry table attribute, "
"this is still an experimental feature")
action, data = table.default_action
default_entry = OrderedDict()
for j_action in json_dict["actions"]:
if j_action["name"] == action.name:
default_entry["action_id"] = j_action["id"]
default_entry["action_const"] = True
if data is not None:
default_entry["action_data"] = [format_hexstr(i) for i in data]
default_entry["action_entry_const"] = False
table_dict["default_entry"] = default_entry
# TODO: temporary, to ensure backwards compatibility
if hasattr(table, "base_default_next"):
table_dict["base_default_next"] = get_table_name(
table.base_default_next)
else: # pragma: no cover
LOG_WARNING("Your 'p4-hlir' is out-of-date, consider updating")
if keep_pragmas:
add_pragmas(table_dict, table)
tables.append(table_dict)
# print 'LOG|Gen json for pipeline|tables:', tables
pipeline_dict["tables"] = tables
pipeline_dict["action_profiles"] = action_profiles
conditionals = []
for name, cnode in hlir.p4_conditional_nodes.items():
if cnode not in node_set:
continue
conditional_dict = OrderedDict()
conditional_dict["name"] = name
conditional_dict["id"] = dump_one_pipeline.condition_id
dump_one_pipeline.condition_id += 1
conditional_dict["expression"] = dump_expression(cnode.condition)
conditional_dict["true_next"] = get_table_name(cnode.next_[True])
conditional_dict["false_next"] = get_table_name(cnode.next_[False])
if keep_pragmas:
add_pragmas(conditional_dict, cnode)
conditionals.append(conditional_dict)
pipeline_dict["conditionals"] = conditionals
return pipeline_dict
def dump_pipelines(json_dict, hlir, keep_pragmas=False):
pipelines = []
# 2 pipelines: ingress and egress
assert(len(hlir.p4_ingress_ptr) == 1 and "only one ingress ptr supported")
ingress_ptr = hlir.p4_ingress_ptr.keys()[0]
pipelines.append(dump_one_pipeline(
json_dict, "ingress", ingress_ptr, hlir, keep_pragmas=keep_pragmas))
egress_ptr = hlir.p4_egress_ptr
pipelines.append(dump_one_pipeline(
json_dict, "egress", egress_ptr, hlir, keep_pragmas=keep_pragmas))
json_dict["pipelines"] = pipelines
def index_OrderedDict(self, kf):
idx = 0
for k, v in self.items():
if(k == kf):
return idx
idx += 1
OrderedDict.index = index_OrderedDict
# TODO: unify with method below
@static_var("ids", {})
def field_list_to_learn_id(p4_field_list):
ids = field_list_to_learn_id.ids
if p4_field_list in ids:
return ids[p4_field_list]
idx = len(ids) + 1
ids[p4_field_list] = idx
return idx
@static_var("ids", {})
def field_list_to_id(p4_field_list):
ids = field_list_to_id.ids
if p4_field_list in ids:
return ids[p4_field_list]
idx = len(ids) + 1
ids[p4_field_list] = idx
return idx
def get_p4_action_set(hlir):
table_actions_set = set()
for _, table in hlir.p4_tables.items():
for action in table.actions:
table_actions_set.add(action)
return table_actions_set
def dump_actions(json_dict, hlir, p4_v1_1=False, keep_pragmas=False):
actions = []
action_id = 0
table_actions_set = get_p4_action_set(hlir)
for action in table_actions_set:
action_dict = OrderedDict()
action_dict["name"] = action.name
action_dict["id"] = action_id
action_id += 1
runtime_data = []
param_with_bit_widths = OrderedDict()
for param, width in zip(action.signature, action.signature_widths):
if not width: # pragma: no cover
LOG_CRITICAL("unused parameter in action def")
param_with_bit_widths[param] = width
param_dict = OrderedDict()
param_dict["name"] = param
param_dict["bitwidth"] = width
runtime_data.append(param_dict)
action_dict["runtime_data"] = runtime_data
def is_stack_ref(call_idx, arg_idx, primitive_name):
# legacy case
if not hasattr(action, "stack_indices"): # pragma: no cover
return (primitive_name in {"push", "pop"} and arg_idx == 0)
stack_indices = action.stack_indices[call_idx]
return (arg_idx in stack_indices)
primitives = []
for call_idx, call in enumerate(action.flat_call_sequence):
primitive_dict = OrderedDict()
if p4_v1_1 and type(call[0]) is p4.p4_extern_method:
primitive_name = "_" + call[0].parent.extern_type.name \
+ "_" + call[0].name
primitive_dict["op"] = primitive_name
args = [call[0].parent.name] + call[1]
else:
primitive_name = call[0].name
primitive_dict["op"] = primitive_name
args = call[1]
# backwards compatibility with older P4 programs
if primitive_name == "modify_field" and len(args) == 3:
LOG_WARNING(
"Your P4 program uses the modify_field() action primitive "
"with 3 arguments (aka masked modify), bmv2 does not "
"support it anymore and this compiler will replace your "
"modify_field(a, b, c) with "
"modify_field(a, (a & ~c) | (b & c))")
Lexpr = p4.p4_expression(args[0], "&",
p4.p4_expression(None, "~", args[2]))
Rexpr = p4.p4_expression(args[1], "&", args[2])
new_arg = p4.p4_expression(Lexpr, "|", Rexpr)
args = [args[0], new_arg]
primitive_args = []
for arg_idx, arg in enumerate(args):
arg_dict = OrderedDict()
if type(arg) is int or type(arg) is long:
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(arg)
elif type(arg) is p4.p4_sized_integer:
# TODO(antonin)
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(arg)
elif type(arg) is p4.p4_field:
arg_dict["type"] = "field"
arg_dict["value"] = format_field_ref(arg)
elif type(arg) is p4.p4_header_instance:
arg_dict["type"] = "header"
arg_dict["value"] = arg.name
elif p4_v1_1 and type(arg) is p4.p4_header_stack:
arg_dict["type"] = "header_stack"
arg_dict["value"] = re.sub(r'\[.*\]', '', arg.name)
elif type(arg) is p4.p4_signature_ref:
arg_dict["type"] = "runtime_data"
arg_dict["value"] = arg.idx
elif type(arg) is p4.p4_field_list:
# hack for generate_digest calls
if primitive_name == "generate_digest":
id_ = field_list_to_learn_id(arg)
elif "clone" in primitive_name or\
primitive_name in {"resubmit", "recirculate"}:
id_ = field_list_to_id(arg)
arg_dict["type"] = "hexstr"
arg_dict["value"] = format_hexstr(id_)
elif type(arg) is p4.p4_field_list_calculation:
arg_dict["type"] = "calculation"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_meter:
arg_dict["type"] = "meter_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_counter:
arg_dict["type"] = "counter_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_register:
arg_dict["type"] = "register_array"
arg_dict["value"] = arg.name
elif type(arg) is p4.p4_expression:
arg_dict["type"] = "expression"
arg_dict["value"] = dump_expression(arg)
elif is_register_ref(arg):
arg_dict["type"] = "register"
arg_dict["value"] = format_register_ref(arg)
elif p4_v1_1 and type(call[0]) is p4.p4_extern_method:
if arg == call[0].parent.name:
arg_dict["type"] = "extern"
arg_dict["value"] = arg
else: # pragma: no cover
LOG_CRITICAL("action arg type is not supported: %s",
type(arg))
if (not p4_v1_1)\
and is_stack_ref(call_idx, arg_idx, primitive_name):
assert(arg_dict["type"] == "header")
arg_dict["type"] = "header_stack"
arg_dict["value"] = re.sub(r'\[.*\]', '', arg_dict["value"])
primitive_args.append(arg_dict)
primitive_dict["parameters"] = primitive_args
primitives.append(primitive_dict)
action_dict["primitives"] = primitives
if keep_pragmas:
add_pragmas(action_dict, action)
actions.append(action_dict)
json_dict["actions"] = actions
def dump_calculations(json_dict, hlir, keep_pragmas):
calculations = []
id_ = 0
for name, p4_calculation in hlir.p4_field_list_calculations.items():
calc_dict = OrderedDict()
calc_dict["name"] = name
calc_dict["id"] = id_
id_ += 1
inputs = p4_calculation.input
assert(len(inputs) == 1)
input_ = inputs[0]
my_input = []
last_header = None
sum_bitwidths = 0
with_payload = False
has_var_width = False
for field in input_.fields:
if type(field) is p4.p4_field:
field_dict = OrderedDict()
field_dict["type"] = "field"
field_dict["value"] = format_field_ref(field)
last_header = field.instance
my_input.append(field_dict)
if field.width == p4.P4_AUTO_WIDTH:
has_var_width = True
else:
sum_bitwidths += field.width
elif type(field) is p4.p4_sized_integer:
field_dict = OrderedDict()
if field.width % 8 != 0: # pragma: no cover
LOG_INFO("you are using a p4 sized integer in '{}' with a "
"bitwidth which is not a multiple of 8, this is "
"still an experimental feature".format(name))
# recycling function I wrote for parser
# TODO: find a better name for it
s = build_match_value([field.width], field)
field_dict["type"] = "hexstr"
field_dict["value"] = s
field_dict["bitwidth"] = field.width
my_input.append(field_dict)
sum_bitwidths += field.width
elif field is p4.P4_PAYLOAD:
with_payload = True
# this case is treated in a somewhat special way. We look at the
# header topo sorting and add them to the calculation
# input. This is not exactly what is described in P4. This is
# obviously not optimal but payload needs to change in P4 anyway
# (it is incorrect).
# for now we hard-code "start" here; it is unsure how we want to
# handle this in the multi-parser / deparser case
topo_sorting = produce_parser_topo_sorting(
hlir, hlir.p4_parse_states["start"])
for i, h in enumerate(topo_sorting):
if h == last_header:
break
for h in topo_sorting[(i + 1):]:
field_dict = OrderedDict()
field_dict["type"] = "header"
field_dict["value"] = h.name
my_input.append(field_dict)
field_dict = OrderedDict()
field_dict["type"] = "payload"
my_input.append(field_dict)
else: # pragma: no cover
LOG_CRITICAL("field lists can only include fields")
with_byte_boundary = (sum_bitwidths % 8) == 0
if (not has_var_width)\
and with_payload\
and (not with_byte_boundary): # pragma: no cover
LOG_CRITICAL("Field list calculation '{}' is not correct; "
"it includes the packet payload but the rest of the "
"fields do not sum up to a bitwidth which is a "
"multiple of 8".format(name))
if (not has_var_width) and (not with_byte_boundary): # pragma: no cover
LOG_WARNING("Field list calculation '{}' computes over a field "
"list whose total bitwidth is not a multiple of 8; "
"this is not recommended as it can lead to undefined "
"behavior; consider adding paddding".format(name))
calc_dict["input"] = my_input
calc_dict["algo"] = p4_calculation.algorithm
# ignored in bmv2, is it a good idea?
# calc_dict["output_width"] = calculation.output_width
if keep_pragmas:
add_pragmas(calc_dict, p4_calculation)
calculations.append(calc_dict)
json_dict["calculations"] = calculations
def dump_checksums(json_dict, hlir):
checksums = []
id_ = 0
for name, p4_header_instance in hlir.p4_header_instances.items():
for field_instance in p4_header_instance.fields:
field_ref = format_field_ref(field_instance)
field_name = '.'.join(field_ref)
for calculation in field_instance.calculation:
checksum_dict = OrderedDict()
type_, calc, if_cond = calculation
if type_ == "verify": # pragma: no cover
LOG_WARNING(
"The P4 program defines a checksum verification on "
"field '{}'; as of now bmv2 ignores all checksum "
"verifications; checksum updates are processed "
"correctly.".format(field_name))
continue
different_width = (calc.output_width != field_instance.width)
if different_width: # pragma: no cover
LOG_CRITICAL(
"For checksum on field '{}', the field width is "
"different from the calulation output width."
.format(field_name))
# if we want the name to be unique, it has to (at least) include
# the name of teh calculation; however do we really need the
# name to be unique
checksum_dict["name"] = "|".join([field_name, calc.name])
checksum_dict["id"] = id_
id_ += 1
checksum_dict["target"] = field_ref
checksum_dict["type"] = "generic"
checksum_dict["calculation"] = calc.name
checksum_dict["if_cond"] = None
if if_cond is not None:
assert(type(if_cond) is p4.p4_expression)
checksum_dict["if_cond"] = dump_expression(if_cond)
checksums.append(checksum_dict)
json_dict["checksums"] = checksums
# TODO: deprecate this function and merge with the one below
def dump_learn_lists(json_dict, hlir):
learn_lists = []
learn_list_ids = field_list_to_learn_id.ids
for p4_field_list, id_ in learn_list_ids.items():
learn_list_dict = OrderedDict()
learn_list_dict["id"] = id_
learn_list_dict["name"] = p4_field_list.name
elements = []
for field in p4_field_list.fields:
element_dict = OrderedDict()
if type(field) is not p4.p4_field: # pragma: no cover
LOG_CRITICAL("only fields supported in field lists for now")
element_dict["type"] = "field"
element_dict["value"] = format_field_ref(field)
elements.append(element_dict)
learn_list_dict["elements"] = elements
learn_lists.append(learn_list_dict)
learn_lists.sort(key=lambda field_list: field_list["id"])
json_dict["learn_lists"] = learn_lists
def dump_field_lists(json_dict, hlir):
field_lists = []
list_ids = field_list_to_id.ids
for p4_field_list, id_ in list_ids.items():
field_list_dict = OrderedDict()
field_list_dict["id"] = id_
field_list_dict["name"] = p4_field_list.name
elements = []
for field in p4_field_list.fields:
element_dict = OrderedDict()
if type(field) is not p4.p4_field: # pragma: no cover
LOG_CRITICAL("only fields supported in field lists for now")
element_dict["type"] = "field"
element_dict["value"] = format_field_ref(field)
elements.append(element_dict)
field_list_dict["elements"] = elements
field_lists.append(field_list_dict)
field_lists.sort(key=lambda field_list: field_list["id"])
json_dict["field_lists"] = field_lists
def dump_meters(json_dict, hlir, keep_pragmas=False):
meters = []
id_ = 0
for name, p4_meter in hlir.p4_meters.items():
meter_dict = OrderedDict()
meter_dict["name"] = name
meter_dict["id"] = id_
id_ += 1
if p4_meter.binding and (p4_meter.binding[0] == p4.P4_DIRECT):
meter_dict["is_direct"] = True
meter_dict["binding"] = p4_meter.binding[1].name
meter_dict["size"] = p4_meter.binding[1].max_size
meter_dict["result_target"] = format_field_ref(p4_meter.result)
else:
meter_dict["is_direct"] = False
meter_dict["size"] = p4_meter.instance_count
meter_dict["rate_count"] = 2 # 2 rate, 3 colors
if p4_meter.type == p4.P4_COUNTER_BYTES:
type_ = "bytes"
elif p4_meter.type == p4.P4_COUNTER_PACKETS:
type_ = "packets"
else: # pragma: no cover
LOG_CRITICAL("invalid meter type")
meter_dict["type"] = type_
if keep_pragmas:
add_pragmas(meter_dict, p4_meter)
meters.append(meter_dict)
json_dict["meter_arrays"] = meters
def dump_counters(json_dict, hlir, keep_pragmas=False):
counters = []
id_ = 0
for name, p4_counter in hlir.p4_counters.items():
counter_dict = OrderedDict()
counter_dict["name"] = name
counter_dict["id"] = id_
id_ += 1
if p4_counter.binding and (p4_counter.binding[0] == p4.P4_DIRECT):
counter_dict["is_direct"] = True
counter_dict["binding"] = p4_counter.binding[1].name
counter_dict["size"] = p4_counter.binding[1].max_size
else:
counter_dict["is_direct"] = False
counter_dict["size"] = p4_counter.instance_count
if keep_pragmas:
add_pragmas(counter_dict, p4_counter)
counters.append(counter_dict)
json_dict["counter_arrays"] = counters
def dump_registers(json_dict, hlir, keep_pragmas=False):
registers = []
id_ = 0
for name, p4_register in hlir.p4_registers.items():
if p4_register.binding and (p4_register.binding[0] == p4.P4_DIRECT):
LOG_CRITICAL("'{}' is a direct register; direct registers are not "
"supported by bmv2".format(name))
register_dict = OrderedDict()
register_dict["name"] = name
register_dict["id"] = id_
id_ += 1
if p4_register.layout is not None: # pragma: no cover
LOG_CRITICAL("registers with layout not supported")
register_dict["bitwidth"] = p4_register.width
register_dict["size"] = p4_register.instance_count
if keep_pragmas:
add_pragmas(register_dict, p4_register)
registers.append(register_dict)
json_dict["register_arrays"] = registers
# TODO: what would be a better solution than this
def dump_force_arith(json_dict, hlir):
force_arith = []
headers = ["standard_metadata", "intrinsic_metadata"]
for header_name in headers:
if header_name not in hlir.p4_header_instances:
continue
p4_header_instance = hlir.p4_header_instances[header_name]
p4_header_type = p4_header_instance.header_type
for field, _ in p4_header_type.layout.items():
force_arith.append([header_name, field])
json_dict["force_arith"] = force_arith
def dump_field_aliases(json_dict, hlir, path_field_aliases):
aliases_dict = OrderedDict()
with open(path_field_aliases, 'r') as f:
for l in f.readlines():
l = l.strip() # remove new line character at the end
try:
alias, field = l.split()
header_name, field_name = field.split(".")
except:
LOG_CRITICAL(
"invalid alias in '{}': '{}'".format(path_field_aliases, l))
if field not in hlir.p4_fields:
LOG_CRITICAL(
"file '{}' defines an alias for '{}', "
"which is not a valid field in the P4 program".format(
path_field_aliases, field))
if alias in aliases_dict:
LOG_WARNING(
"file '{}' contains a duplicate alias: '{}'; "
"latest definition will be used".format(
path_field_aliases, alias))
aliases_dict[alias] = [header_name, field_name]
# TODO: should I use the dictionary directly instead?
field_aliases = [[a, v] for a, v in aliases_dict.items()]
json_dict["field_aliases"] = field_aliases
def dump_extern_instances(json_dict, hlir):
extern_instances = []
id_ = 0
for name, p4_extern_instance in hlir.p4_extern_instances.items():
extern_instance_dict = OrderedDict()
extern_instance_dict["name"] = name
extern_instance_dict["id"] = id_
extern_instance_dict["type"] = p4_extern_instance.extern_type.name
id_ += 1
attributes = []
for attribute, attr in p4_extern_instance.attributes.items():
attr_type = p4_extern_instance.extern_type.attributes[attribute].\
value_type.type_name
if attr_type != "bit" and attr_type != "int": # pragma: no cover
LOG_CRITICAL(
"Attribute type '{}' not supported for the "
"extern type '{}'. Supported values are bit and int".
format(attr_type, p4_extern_instance.extern_type.name))
attribute_dict = OrderedDict()
attribute_dict["name"] = attribute
attribute_dict["type"] = "hexstr"
attribute_dict["value"] = hex(attr)
attributes.append(attribute_dict)
extern_instance_dict["attribute_values"] = attributes
extern_instances.append(extern_instance_dict)
json_dict["extern_instances"] = extern_instances
def add_meta(json_dict):
meta_dict = OrderedDict()
# major and minor version numbers, a change in minor version number does not
# break backward-compatibility
meta_dict["version"] = [2, 5]
meta_dict["compiler"] = "ShadowP4 compiler: https://github.com/Lineson/SP4c-bm.git"
json_dict["__meta__"] = meta_dict
def json_dict_create(hlir, path_field_aliases=None, p4_v1_1=False,
keep_pragmas=False):
# a bit hacky: import the correct HLIR based on the P4 version
import importlib
global p4
if p4_v1_1:
p4 = importlib.import_module("p4_hlir_v1_1.hlir.p4")
else:
p4 = importlib.import_module("p4_hlir.hlir.p4")
# mostly needed for unit tests, I could write a more elegant solution...
reset_static_vars()
json_dict = OrderedDict()
add_meta(json_dict)
dump_header_types(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_headers(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_header_stacks(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_parsers(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_parse_vsets(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_deparsers(json_dict, hlir, p4_v1_1=p4_v1_1)
dump_meters(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_actions(json_dict, hlir, p4_v1_1=p4_v1_1, keep_pragmas=keep_pragmas)
dump_pipelines(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_calculations(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_checksums(json_dict, hlir)
dump_learn_lists(json_dict, hlir)
dump_field_lists(json_dict, hlir)
dump_counters(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_registers(json_dict, hlir, keep_pragmas=keep_pragmas)
dump_force_arith(json_dict, hlir)
if p4_v1_1 and hlir.p4_extern_instances:
LOG_WARNING("Initial support for extern types: be aware!")
dump_extern_instances(json_dict, hlir)
if path_field_aliases:
dump_field_aliases(json_dict, hlir, path_field_aliases)
return json_dict
|
# -*- coding: utf-8 -*-
import six
from ecl.tests.functional import base
class TestExtension(base.BaseFunctionalTest):
def test_list(self):
sots = self.conn.block_store.extensions()
self.assertGreaterEqual(len(sots), 0)
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the DataflowRunner class."""
# pytype: skip-file
from __future__ import absolute_import
import json
import sys
import unittest
from builtins import object
from builtins import range
from datetime import datetime
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import mock
import apache_beam as beam
import apache_beam.transforms as ptransform
from apache_beam.coders import BytesCoder
from apache_beam.coders import coders
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import PCollection
from apache_beam.runners import DataflowRunner
from apache_beam.runners import TestDataflowRunner
from apache_beam.runners import create_runner
from apache_beam.runners.dataflow.dataflow_runner import DataflowPipelineResult
from apache_beam.runners.dataflow.dataflow_runner import DataflowRuntimeException
from apache_beam.runners.dataflow.internal.clients import dataflow as dataflow_api
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.extra_assertions import ExtraAssertionsMixin
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms import environments
from apache_beam.transforms import window
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import typehints
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
apiclient = None # type: ignore
# pylint: enable=wrong-import-order, wrong-import-position
# SpecialParDo and SpecialDoFn are used in test_remote_runner_display_data.
# Due to BEAM-8482, these need to be declared outside of the test method.
# TODO: Should not subclass ParDo. Switch to PTransform as soon as
# composite transforms support display data.
class SpecialParDo(beam.ParDo):
def __init__(self, fn, now):
super(SpecialParDo, self).__init__(fn)
self.fn = fn
self.now = now
# Make this a list to be accessible within closure
def display_data(self):
return {
'asubcomponent': self.fn, 'a_class': SpecialParDo, 'a_time': self.now
}
class SpecialDoFn(beam.DoFn):
def display_data(self):
return {'dofn_value': 42}
def process(self):
pass
@unittest.skipIf(apiclient is None, 'GCP dependencies are not installed')
class DataflowRunnerTest(unittest.TestCase, ExtraAssertionsMixin):
def setUp(self):
self.default_properties = [
'--dataflow_endpoint=ignored',
'--job_name=test-job',
'--project=test-project',
'--staging_location=ignored',
'--temp_location=/dev/null',
'--no_auth',
'--dry_run=True',
'--sdk_location=container'
]
@mock.patch('time.sleep', return_value=None)
def test_wait_until_finish(self, patched_time_sleep):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
class MockDataflowRunner(object):
def __init__(self, states):
self.dataflow_client = mock.MagicMock()
self.job = mock.MagicMock()
self.job.currentState = values_enum.JOB_STATE_UNKNOWN
self._states = states
self._next_state_index = 0
def get_job_side_effect(*args, **kwargs):
self.job.currentState = self._states[self._next_state_index]
if self._next_state_index < (len(self._states) - 1):
self._next_state_index += 1
return mock.DEFAULT
self.dataflow_client.get_job = mock.MagicMock(
return_value=self.job, side_effect=get_job_side_effect)
self.dataflow_client.list_messages = mock.MagicMock(
return_value=([], None))
with self.assertRaisesRegex(DataflowRuntimeException,
'Dataflow pipeline failed. State: FAILED'):
failed_runner = MockDataflowRunner([values_enum.JOB_STATE_FAILED])
failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)
failed_result.wait_until_finish()
succeeded_runner = MockDataflowRunner([values_enum.JOB_STATE_DONE])
succeeded_result = DataflowPipelineResult(
succeeded_runner.job, succeeded_runner)
result = succeeded_result.wait_until_finish()
self.assertEqual(result, PipelineState.DONE)
# Time array has duplicate items, because some logging implementations also
# call time.
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):
duration_succeeded_runner = MockDataflowRunner(
[values_enum.JOB_STATE_RUNNING, values_enum.JOB_STATE_DONE])
duration_succeeded_result = DataflowPipelineResult(
duration_succeeded_runner.job, duration_succeeded_runner)
result = duration_succeeded_result.wait_until_finish(5000)
self.assertEqual(result, PipelineState.DONE)
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 9, 9, 20, 20])):
duration_timedout_runner = MockDataflowRunner(
[values_enum.JOB_STATE_RUNNING])
duration_timedout_result = DataflowPipelineResult(
duration_timedout_runner.job, duration_timedout_runner)
result = duration_timedout_result.wait_until_finish(5000)
self.assertEqual(result, PipelineState.RUNNING)
with mock.patch('time.time', mock.MagicMock(side_effect=[1, 1, 2, 2, 3])):
with self.assertRaisesRegex(DataflowRuntimeException,
'Dataflow pipeline failed. State: CANCELLED'):
duration_failed_runner = MockDataflowRunner(
[values_enum.JOB_STATE_CANCELLED])
duration_failed_result = DataflowPipelineResult(
duration_failed_runner.job, duration_failed_runner)
duration_failed_result.wait_until_finish(5000)
@mock.patch('time.sleep', return_value=None)
def test_cancel(self, patched_time_sleep):
values_enum = dataflow_api.Job.CurrentStateValueValuesEnum
class MockDataflowRunner(object):
def __init__(self, state, cancel_result):
self.dataflow_client = mock.MagicMock()
self.job = mock.MagicMock()
self.job.currentState = state
self.dataflow_client.get_job = mock.MagicMock(return_value=self.job)
self.dataflow_client.modify_job_state = mock.MagicMock(
return_value=cancel_result)
self.dataflow_client.list_messages = mock.MagicMock(
return_value=([], None))
with self.assertRaisesRegex(DataflowRuntimeException,
'Failed to cancel job'):
failed_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, False)
failed_result = DataflowPipelineResult(failed_runner.job, failed_runner)
failed_result.cancel()
succeeded_runner = MockDataflowRunner(values_enum.JOB_STATE_RUNNING, True)
succeeded_result = DataflowPipelineResult(
succeeded_runner.job, succeeded_runner)
succeeded_result.cancel()
terminal_runner = MockDataflowRunner(values_enum.JOB_STATE_DONE, False)
terminal_result = DataflowPipelineResult(
terminal_runner.job, terminal_runner)
terminal_result.cancel()
def test_create_runner(self):
self.assertTrue(isinstance(create_runner('DataflowRunner'), DataflowRunner))
self.assertTrue(
isinstance(create_runner('TestDataflowRunner'), TestDataflowRunner))
def test_environment_override_translation(self):
self.default_properties.append('--experiments=beam_fn_api')
self.default_properties.append('--worker_harness_container_image=FOO')
remote_runner = DataflowRunner()
with Pipeline(remote_runner,
options=PipelineOptions(self.default_properties)) as p:
( # pylint: disable=expression-not-assigned
p | ptransform.Create([1, 2, 3])
| 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])
| ptransform.GroupByKey())
self.assertEqual(
list(remote_runner.proto_pipeline.components.environments.values()),
[
beam_runner_api_pb2.Environment(
urn=common_urns.environments.DOCKER.urn,
payload=beam_runner_api_pb2.DockerPayload(
container_image='FOO').SerializeToString(),
capabilities=environments.python_sdk_capabilities())
])
def test_remote_runner_translation(self):
remote_runner = DataflowRunner()
with Pipeline(remote_runner,
options=PipelineOptions(self.default_properties)) as p:
( # pylint: disable=expression-not-assigned
p | ptransform.Create([1, 2, 3])
| 'Do' >> ptransform.FlatMap(lambda x: [(x, x)])
| ptransform.GroupByKey())
def test_streaming_create_translation(self):
remote_runner = DataflowRunner()
self.default_properties.append("--streaming")
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
job_dict = json.loads(str(remote_runner.job))
self.assertEqual(len(job_dict[u'steps']), 3)
self.assertEqual(job_dict[u'steps'][0][u'kind'], u'ParallelRead')
self.assertEqual(
job_dict[u'steps'][0][u'properties'][u'pubsub_subscription'],
'_starting_signal/')
self.assertEqual(job_dict[u'steps'][1][u'kind'], u'ParallelDo')
self.assertEqual(job_dict[u'steps'][2][u'kind'], u'ParallelDo')
def test_bigquery_read_streaming_fail(self):
remote_runner = DataflowRunner()
self.default_properties.append("--streaming")
with self.assertRaisesRegex(ValueError,
r'source is not currently available'):
with Pipeline(remote_runner,
PipelineOptions(self.default_properties)) as p:
_ = p | beam.io.Read(beam.io.BigQuerySource('some.table'))
def test_biqquery_read_fn_api_fail(self):
remote_runner = DataflowRunner()
for flag in ['beam_fn_api', 'use_unified_worker', 'use_runner_v2']:
self.default_properties.append("--experiments=%s" % flag)
with self.assertRaisesRegex(
ValueError,
'The Read.BigQuerySource.*is not supported.*'
'apache_beam.io.gcp.bigquery.ReadFromBigQuery.*'):
with Pipeline(remote_runner,
PipelineOptions(self.default_properties)) as p:
_ = p | beam.io.Read(beam.io.BigQuerySource('some.table'))
def test_remote_runner_display_data(self):
remote_runner = DataflowRunner()
p = Pipeline(
remote_runner, options=PipelineOptions(self.default_properties))
now = datetime.now()
# pylint: disable=expression-not-assigned
(
p | ptransform.Create([1, 2, 3, 4, 5])
| 'Do' >> SpecialParDo(SpecialDoFn(), now))
# TODO(BEAM-366) Enable runner API on this test.
p.run(test_runner_api=False)
job_dict = json.loads(str(remote_runner.job))
steps = [
step for step in job_dict['steps']
if len(step['properties'].get('display_data', [])) > 0
]
step = steps[1]
disp_data = step['properties']['display_data']
nspace = SpecialParDo.__module__ + '.'
expected_data = [{
'type': 'TIMESTAMP',
'namespace': nspace + 'SpecialParDo',
'value': DisplayDataItem._format_value(now, 'TIMESTAMP'),
'key': 'a_time'
},
{
'type': 'STRING',
'namespace': nspace + 'SpecialParDo',
'value': nspace + 'SpecialParDo',
'key': 'a_class',
'shortValue': 'SpecialParDo'
},
{
'type': 'INTEGER',
'namespace': nspace + 'SpecialDoFn',
'value': 42,
'key': 'dofn_value'
}]
self.assertUnhashableCountEqual(disp_data, expected_data)
def test_no_group_by_key_directly_after_bigquery(self):
remote_runner = DataflowRunner()
with self.assertRaises(ValueError,
msg=('Coder for the GroupByKey operation'
'"GroupByKey" is not a key-value coder: '
'RowAsDictJsonCoder')):
with beam.Pipeline(runner=remote_runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.io.Read(
beam.io.BigQuerySource('dataset.faketable')) | beam.GroupByKey()
def test_group_by_key_input_visitor_with_valid_inputs(self):
p = TestPipeline()
pcoll1 = PCollection(p)
pcoll2 = PCollection(p)
pcoll3 = PCollection(p)
pcoll1.element_type = None
pcoll2.element_type = typehints.Any
pcoll3.element_type = typehints.KV[typehints.Any, typehints.Any]
for pcoll in [pcoll1, pcoll2, pcoll3]:
applied = AppliedPTransform(None, beam.GroupByKey(), "label", [pcoll])
applied.outputs[None] = PCollection(None)
DataflowRunner.group_by_key_input_visitor().visit_transform(applied)
self.assertEqual(
pcoll.element_type, typehints.KV[typehints.Any, typehints.Any])
def test_group_by_key_input_visitor_with_invalid_inputs(self):
p = TestPipeline()
pcoll1 = PCollection(p)
pcoll2 = PCollection(p)
pcoll1.element_type = str
pcoll2.element_type = typehints.Set
err_msg = (
r"Input to 'label' must be compatible with KV\[Any, Any\]. "
"Found .*")
for pcoll in [pcoll1, pcoll2]:
with self.assertRaisesRegex(ValueError, err_msg):
DataflowRunner.group_by_key_input_visitor().visit_transform(
AppliedPTransform(None, beam.GroupByKey(), "label", [pcoll]))
def test_group_by_key_input_visitor_for_non_gbk_transforms(self):
p = TestPipeline()
pcoll = PCollection(p)
for transform in [beam.Flatten(), beam.Map(lambda x: x)]:
pcoll.element_type = typehints.Any
DataflowRunner.group_by_key_input_visitor().visit_transform(
AppliedPTransform(None, transform, "label", [pcoll]))
self.assertEqual(pcoll.element_type, typehints.Any)
def test_flatten_input_with_visitor_with_single_input(self):
self._test_flatten_input_visitor(typehints.KV[int, int], typehints.Any, 1)
def test_flatten_input_with_visitor_with_multiple_inputs(self):
self._test_flatten_input_visitor(
typehints.KV[int, typehints.Any], typehints.Any, 5)
def _test_flatten_input_visitor(self, input_type, output_type, num_inputs):
p = TestPipeline()
inputs = []
for _ in range(num_inputs):
input_pcoll = PCollection(p)
input_pcoll.element_type = input_type
inputs.append(input_pcoll)
output_pcoll = PCollection(p)
output_pcoll.element_type = output_type
flatten = AppliedPTransform(None, beam.Flatten(), "label", inputs)
flatten.add_output(output_pcoll, None)
DataflowRunner.flatten_input_visitor().visit_transform(flatten)
for _ in range(num_inputs):
self.assertEqual(inputs[0].element_type, output_type)
def test_gbk_then_flatten_input_visitor(self):
p = TestPipeline(
runner=DataflowRunner(),
options=PipelineOptions(self.default_properties))
none_str_pc = p | 'c1' >> beam.Create({None: 'a'})
none_int_pc = p | 'c2' >> beam.Create({None: 3})
flat = (none_str_pc, none_int_pc) | beam.Flatten()
_ = flat | beam.GroupByKey()
# This may change if type inference changes, but we assert it here
# to make sure the check below is not vacuous.
self.assertNotIsInstance(flat.element_type, typehints.TupleConstraint)
p.visit(DataflowRunner.group_by_key_input_visitor())
p.visit(DataflowRunner.flatten_input_visitor())
# The dataflow runner requires gbk input to be tuples *and* flatten
# inputs to be equal to their outputs. Assert both hold.
self.assertIsInstance(flat.element_type, typehints.TupleConstraint)
self.assertEqual(flat.element_type, none_str_pc.element_type)
self.assertEqual(flat.element_type, none_int_pc.element_type)
def test_serialize_windowing_strategy(self):
# This just tests the basic path; more complete tests
# are in window_test.py.
strategy = Windowing(window.FixedWindows(10))
self.assertEqual(
strategy,
DataflowRunner.deserialize_windowing_strategy(
DataflowRunner.serialize_windowing_strategy(strategy, None)))
def test_side_input_visitor(self):
p = TestPipeline()
pc = p | beam.Create([])
transform = beam.Map(
lambda x,
y,
z: (x, y, z),
beam.pvalue.AsSingleton(pc),
beam.pvalue.AsMultiMap(pc))
applied_transform = AppliedPTransform(None, transform, "label", [pc])
DataflowRunner.side_input_visitor(
use_fn_api=True).visit_transform(applied_transform)
self.assertEqual(2, len(applied_transform.side_inputs))
for side_input in applied_transform.side_inputs:
self.assertEqual(
common_urns.side_inputs.MULTIMAP.urn,
side_input._side_input_data().access_pattern)
def test_min_cpu_platform_flag_is_propagated_to_experiments(self):
remote_runner = DataflowRunner()
self.default_properties.append('--min_cpu_platform=Intel Haswell')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
self.assertIn(
'min_cpu_platform=Intel Haswell',
remote_runner.job.options.view_as(DebugOptions).experiments)
def test_streaming_engine_flag_adds_windmill_experiments(self):
remote_runner = DataflowRunner()
self.default_properties.append('--streaming')
self.default_properties.append('--enable_streaming_engine')
self.default_properties.append('--experiment=some_other_experiment')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('enable_streaming_engine', experiments_for_job)
self.assertIn('enable_windmill_service', experiments_for_job)
self.assertIn('some_other_experiment', experiments_for_job)
def test_upload_graph_experiment(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=upload_graph')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('upload_graph', experiments_for_job)
def test_dataflow_worker_jar_flag_non_fnapi_noop(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=some_other_experiment')
self.default_properties.append('--dataflow_worker_jar=test.jar')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('some_other_experiment', experiments_for_job)
self.assertNotIn('use_staged_dataflow_worker_jar', experiments_for_job)
def test_dataflow_worker_jar_flag_adds_use_staged_worker_jar_experiment(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=beam_fn_api')
self.default_properties.append('--dataflow_worker_jar=test.jar')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
experiments_for_job = (
remote_runner.job.options.view_as(DebugOptions).experiments)
self.assertIn('beam_fn_api', experiments_for_job)
self.assertIn('use_staged_dataflow_worker_jar', experiments_for_job)
def test_use_fastavro_experiment_is_added_on_py3_and_onwards(self):
remote_runner = DataflowRunner()
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
self.assertEqual(
sys.version_info[0] > 2,
remote_runner.job.options.view_as(DebugOptions).lookup_experiment(
'use_fastavro', False))
def test_use_fastavro_experiment_is_not_added_when_use_avro_is_present(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=use_avro')
with Pipeline(remote_runner, PipelineOptions(self.default_properties)) as p:
p | ptransform.Create([1]) # pylint: disable=expression-not-assigned
debug_options = remote_runner.job.options.view_as(DebugOptions)
self.assertFalse(debug_options.lookup_experiment('use_fastavro', False))
def test_unsupported_fnapi_features(self):
remote_runner = DataflowRunner()
self.default_properties.append('--experiment=beam_fn_api')
self.default_properties.append('--experiment=use_runner_v2')
with self.assertRaisesRegex(RuntimeError, 'Unsupported merging'):
with Pipeline(remote_runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.Create([]) | beam.WindowInto(CustomMergingWindowFn())
with self.assertRaisesRegex(RuntimeError, 'Unsupported window coder'):
with Pipeline(remote_runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.Create([]) | beam.WindowInto(CustomWindowTypeWindowFn())
@mock.patch('os.environ.get', return_value=None)
@mock.patch('apache_beam.utils.processes.check_output', return_value=b'')
def test_get_default_gcp_region_no_default_returns_none(
self, patched_environ, patched_processes):
runner = DataflowRunner()
result = runner.get_default_gcp_region()
self.assertIsNone(result)
@mock.patch('os.environ.get', return_value='some-region1')
@mock.patch('apache_beam.utils.processes.check_output', return_value=b'')
def test_get_default_gcp_region_from_environ(
self, patched_environ, patched_processes):
runner = DataflowRunner()
result = runner.get_default_gcp_region()
self.assertEqual(result, 'some-region1')
@mock.patch('os.environ.get', return_value=None)
@mock.patch(
'apache_beam.utils.processes.check_output',
return_value=b'some-region2\n')
def test_get_default_gcp_region_from_gcloud(
self, patched_environ, patched_processes):
runner = DataflowRunner()
result = runner.get_default_gcp_region()
self.assertEqual(result, 'some-region2')
@mock.patch('os.environ.get', return_value=None)
@mock.patch(
'apache_beam.utils.processes.check_output',
side_effect=RuntimeError('Executable gcloud not found'))
def test_get_default_gcp_region_ignores_error(
self, patched_environ, patched_processes):
runner = DataflowRunner()
result = runner.get_default_gcp_region()
self.assertIsNone(result)
def test_combine_values_translation(self):
runner = DataflowRunner()
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
( # pylint: disable=expression-not-assigned
p
| beam.Create([('a', [1, 2]), ('b', [3, 4])])
| beam.CombineValues(lambda v, _: sum(v)))
job_dict = json.loads(str(runner.job))
self.assertIn(
u'CombineValues', set(step[u'kind'] for step in job_dict[u'steps']))
def _find_step(self, job, step_name):
job_dict = json.loads(str(job))
maybe_step = [
s for s in job_dict[u'steps']
if s[u'properties'][u'user_name'] == step_name
]
self.assertTrue(maybe_step, 'Could not find step {}'.format(step_name))
return maybe_step[0]
def expect_correct_override(self, job, step_name, step_kind):
"""Expects that a transform was correctly overriden."""
# If the typing information isn't being forwarded correctly, the component
# encodings here will be incorrect.
expected_output_info = [{
"encoding": {
"@type": "kind:windowed_value",
"component_encodings": [{
"@type": "kind:bytes"
}, {
"@type": "kind:global_window"
}],
"is_wrapper": True
},
"output_name": "out",
"user_name": step_name + ".out"
}]
step = self._find_step(job, step_name)
self.assertEqual(step[u'kind'], step_kind)
# The display data here is forwarded because the replace transform is
# subclassed from iobase.Read.
self.assertGreater(len(step[u'properties']['display_data']), 0)
self.assertEqual(step[u'properties']['output_info'], expected_output_info)
def test_read_create_translation(self):
runner = DataflowRunner()
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.Create([b'a', b'b', b'c'])
self.expect_correct_override(runner.job, u'Create/Read', u'ParallelRead')
def test_read_bigquery_translation(self):
runner = DataflowRunner()
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.io.Read(beam.io.BigQuerySource('some.table', coder=BytesCoder()))
self.expect_correct_override(runner.job, u'Read', u'ParallelRead')
def test_read_pubsub_translation(self):
runner = DataflowRunner()
self.default_properties.append("--streaming")
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.io.ReadFromPubSub(topic='projects/project/topics/topic')
self.expect_correct_override(
runner.job, u'ReadFromPubSub/Read', u'ParallelRead')
def test_gbk_translation(self):
runner = DataflowRunner()
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.Create([(1, 2)]) | beam.GroupByKey()
expected_output_info = [{
"encoding": {
"@type": "kind:windowed_value",
"component_encodings": [{
"@type": "kind:pair",
"component_encodings": [{
"@type": "kind:varint"
},
{
"@type": "kind:stream",
"component_encodings": [{
"@type": "kind:varint"
}],
"is_stream_like": True
}],
"is_pair_like": True
}, {
"@type": "kind:global_window"
}],
"is_wrapper": True
},
"output_name": "out",
"user_name": "GroupByKey.out"
}] # yapf: disable
gbk_step = self._find_step(runner.job, u'GroupByKey')
self.assertEqual(gbk_step[u'kind'], u'GroupByKey')
self.assertEqual(
gbk_step[u'properties']['output_info'], expected_output_info)
def test_write_bigquery_translation(self):
runner = DataflowRunner()
self.default_properties.append('--experiments=use_legacy_bq_sink')
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
p | beam.Create([1]) | beam.io.WriteToBigQuery('some.table')
job_dict = json.loads(str(runner.job))
expected_step = {
"kind": "ParallelWrite",
"name": "s2",
"properties": {
"create_disposition": "CREATE_IF_NEEDED",
"dataset": "some",
"display_data": [],
"encoding": {
"@type": "kind:windowed_value",
"component_encodings": [{
"component_encodings": [],
"pipeline_proto_coder_id": "ref_Coder_RowAsDictJsonCoder_4"
}, {
"@type": "kind:global_window"
}],
"is_wrapper": True
},
"format": "bigquery",
"parallel_input": {
"@type": "OutputReference",
"output_name": "out",
"step_name": "s1"
},
"table": "table",
"user_name": "WriteToBigQuery/Write/NativeWrite",
"write_disposition": "WRITE_APPEND"
}
}
job_dict = json.loads(str(runner.job))
write_step = [
s for s in job_dict[u'steps']
if s[u'properties'][u'user_name'].startswith('WriteToBigQuery')
][0]
# Delete the @type field because in this case it is a hash which may change
# depending on the pickling version.
step_encoding = write_step[u'properties'][u'encoding']
del step_encoding[u'component_encodings'][0][u'@type']
self.assertEqual(expected_step, write_step)
def test_write_bigquery_failed_translation(self):
"""Tests that WriteToBigQuery cannot have any consumers if replaced."""
runner = DataflowRunner()
self.default_properties.append('--experiments=use_legacy_bq_sink')
with self.assertRaises(Exception):
with beam.Pipeline(runner=runner,
options=PipelineOptions(self.default_properties)) as p:
# pylint: disable=expression-not-assigned
out = p | beam.Create([1]) | beam.io.WriteToBigQuery('some.table')
out['destination_file_pairs'] | 'MyTransform' >> beam.Map(lambda _: _)
class CustomMergingWindowFn(window.WindowFn):
def assign(self, assign_context):
return []
def merge(self, merge_context):
pass
def get_window_coder(self):
return coders.IntervalWindowCoder()
class CustomWindowTypeWindowFn(window.NonMergingWindowFn):
def assign(self, assign_context):
return []
def get_window_coder(self):
return coders.BytesCoder()
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 21:31:58 2020
@author: hexx
"""
# -*- coding: utf-8 -*-
"""
Created on Sat May 9 18:19:50 2020
@author: hexx
"""
import pandas as pd
import numpy as np
import os
from scipy.optimize import minimize, Bounds
from myFunctions import def_add_datashift, createFolder
import matplotlib.pyplot as plt
'''
Data preparation
'''
#weekly fuel demand
today = pd.to_datetime('today')
today =today.strftime("%Y-%m-%d")
# today = '2020-07-02'
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
# Model_Date = np.load("./Model_Parameter.npy",allow_pickle='TRUE').item()
google_Mobility_Day = PODA_Model['ML_File_Date']
start_Date = '2-15-2020'
end_Date = PODA_Model['ML_File_Date'] #'5-2-2020'
# google_Mobility_Day='2020-05-17'
fuel_Demand_EIA = pd.read_excel('https://www.eia.gov/dnav/pet/xls/PET_CONS_WPSUP_K_W.xls', sheet_name = 'Data 1', header=2)
fuel_Demand_EIA['Date'] = pd.to_datetime(fuel_Demand_EIA['Date'])
fuel_Demand_EIA.rename(columns={'Weekly U.S. Product Supplied of Finished Motor Gasoline (Thousand Barrels per Day)':'Gasoline'}, inplace=True)
fuel_Demand_EIA = fuel_Demand_EIA.drop(columns=['Weekly U.S. Product Supplied of Petroleum Products (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Kerosene-Type Jet Fuel (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Distillate Fuel Oil (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Residual Fuel Oil (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Propane and Propylene (Thousand Barrels per Day)',
'Weekly U.S. Product Supplied of Other Oils (Thousand Barrels per Day)'])
fuel_Demand_EIA = fuel_Demand_EIA[(fuel_Demand_EIA['Date'] > pd.to_datetime(start_Date)) & (fuel_Demand_EIA['Date'] < pd.to_datetime(end_Date))]
fuel_Demand_EIA = fuel_Demand_EIA.set_index('Date')
PODA_Model['Fuel_Demand_EIA'] = fuel_Demand_EIA
case = 'mean'
cwd = os.getcwd()
# datafile = './ML Files/State_Level_Data_forML_'+google_Mobility_Day+'.xlsx'
# datafile =str(Path(cwd)) + datafile
# pd_all = pd.read_excel(datafile)
# pd_all = PODA_Model['ML_Data'].reset_index()
# projectionFile ='/MObility_Projection_'+google_Mobility_End_Day+'_'+case+'.xlsx'
# projectionFile = str(Path(cwd))+projectionFile
# mobility_Proj_Data = pd.read_excel(projectionFile)
# data_used = pd_all[['date', 'WeekDay', 'State Name', 'retail_and_recreation', 'grocery_and_pharmacy', 'workplaces', 'parks',
# 'EmergDec', 'SchoolClose', 'NEBusinessClose',
# 'RestaurantRestrict', 'StayAtHome']]
# del pd_all
data_used = PODA_Model['Google_Apple_Mobility_Projection_mean']
data_used = data_used[(data_used['date']> (pd.to_datetime(start_Date)-pd.DateOffset(days=7))) & (data_used['date'] < pd.to_datetime(end_Date))]
data_used = data_used.set_index('date')
# data_used = data_used[(data_used['date']> (pd.to_datetime(start_Date)-pd.DateOffset(days=7))) & (data_used['date'] < pd.to_datetime(end_Date))]
# data_used = data_used.set_index('date')
NHTS_Category_Share = pd.read_excel('NHTS.xlsx', sheet_name='Category Share')
NHTS_State_Fuel_Share = pd.read_excel('NHTS.xlsx', sheet_name='State Fuel Share')
PODA_Model['NHTS Category Share'] = NHTS_Category_Share
PODA_Model['NHTS State Fuel Share'] =NHTS_State_Fuel_Share
df_StateName_Code = pd.read_excel(cwd+'/US_StateCode_List.xlsx', sheet_name='Sheet1', header=0)
cols = ['State Name']
data_used = data_used.join(df_StateName_Code.set_index(cols), on=cols, how='left')
data_used = data_used.join(NHTS_Category_Share.set_index('State Code'), on='State Code', how='left')
factor = PODA_Model['Google_Mobility_EIA_Factor']
data_used['work factor'] = 1 + data_used['Workplaces']/100*factor[0]
data_used['school factor'] = 1 + data_used['Workplaces']/100*factor[1]
data_used['medical factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[2]
data_used['shopping factor'] = 1 + data_used['Grocery and Pharmacy']/100*factor[3]
data_used['social factor'] = 1 + data_used['Retail and Recreation']/100*factor[4]
data_used['park factor'] = 1 + data_used['Parks']/100*factor[5]
data_used['transport someone factor'] = 1+ data_used['Retail and Recreation']/100*factor[7] #Workplaces
data_used['meals factor'] = 1 + data_used['Retail and Recreation']/100*factor[6]
data_used['else factor'] = 1+ data_used['Retail and Recreation']/100*factor[7]
data_used['accumulated factor'] = (data_used['Work']*data_used['work factor'] + \
data_used['School/Daycare/Religious activity']*data_used['school factor'] + \
data_used['Medical/Dental services']*data_used['medical factor'] + \
data_used['Shopping/Errands']*data_used['shopping factor'] + \
data_used['Social/Recreational']*factor[8]*data_used['social factor'] + \
data_used['Social/Recreational']*(1-factor[8])*data_used['park factor'] + \
data_used['Meals']*data_used['meals factor'] +\
data_used['Transport someone']*data_used['transport someone factor'] + \
data_used['Something else']*data_used['else factor'])/100 + factor[9]
DayShift = int(factor[10])
aa = data_used.join(NHTS_State_Fuel_Share.set_index('State Name'), on='State Name', how='left')
aa['fuel factor'] = aa['accumulated factor']*aa['Percentage gasoline']
x = aa.sum(level='date')
x = x[['fuel factor','WeekDay']]
x['WeekDay'] = x['WeekDay']/50
# demand_factor = 0.93840494
baseline = 8722 #average of EIA between Jan 03-Feb 07(thousand bpd)
x['Shifted Date'] = x.index+pd.DateOffset(days=DayShift)
# EIA_Fuel = fuel_Demand_EIA.join(x.set_index('Date'), on='Date', how='left')
EIA_Fuel = fuel_Demand_EIA[['Gasoline']]
for i, date_i in enumerate(fuel_Demand_EIA.index):
# print(i, date_i)
Google_weekly = x[(x['Shifted Date']<=pd.to_datetime(date_i)) & (x['Shifted Date']>(pd.to_datetime(date_i)-pd.DateOffset(days=7)))]
#
# apple_weekly['fuel factor'].mean(afuel_factoris =0)
EIA_Fuel.loc[date_i, 'Google'] = Google_weekly['fuel factor'].mean(axis =0)
EIA_Fuel = EIA_Fuel.dropna()
EIA_Fuel['fuelpred'] = EIA_Fuel['Google']*baseline
EIA_Fuel_Clean = EIA_Fuel[EIA_Fuel.index != pd.to_datetime('05-08-2020')]
EIA_Fuel_Clean['least_square'] = ((EIA_Fuel_Clean['Gasoline']-EIA_Fuel_Clean['fuelpred'])/EIA_Fuel_Clean['Gasoline'])**2
retu = EIA_Fuel_Clean['least_square'].sum()
print(retu)
fig1 = plt.figure(figsize=(6, 5))
ax1 = fig1.add_subplot(1, 1, 1)
ax1.plot(EIA_Fuel_Clean.index, EIA_Fuel_Clean['fuelpred'], '-o', label=['pred'])
ax1.plot(EIA_Fuel_Clean.index, EIA_Fuel_Clean['Gasoline'], '-o', label=['EIA'])
# ax1.plot(fuel_Demand_EIA.index, x, '-o', label=['orig'])
ax1.set_xlabel('Date')
ax1.set_ylabel('Y')
ax1.set_title('fuel demand: shift:'+str(DayShift)+' days'+ 'R2='+str(retu))
ax1.legend()
|
#
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
from ctypes import c_void_p, CFUNCTYPE, Structure, c_int
from .shared import SharedOcfObject
class CleanerOps(Structure):
INIT = CFUNCTYPE(c_int, c_void_p)
KICK = CFUNCTYPE(None, c_void_p)
STOP = CFUNCTYPE(None, c_void_p)
_fields_ = [("init", INIT), ("kick", KICK), ("stop", STOP)]
class Cleaner(SharedOcfObject):
_instances_ = {}
_fields_ = [("cleaner", c_void_p)]
def __init__(self):
self._as_parameter_ = self.cleaner
super().__init__()
@classmethod
def get_ops(cls):
return CleanerOps(init=cls._init, kick=cls._kick, stop=cls._stop)
@staticmethod
@CleanerOps.INIT
def _init(cleaner):
return 0
@staticmethod
@CleanerOps.KICK
def _kick(cleaner):
pass
@staticmethod
@CleanerOps.STOP
def _stop(cleaner):
pass
|
INPUT_FEEDBACK = 0
INPUT_BROADCAST_MESSAGE = 1
INPUT_DIRECT_MESSAGE = 2
INPUT_USER_CRITERIA = 3
|
import socket
import random
import time
HOST = '127.0.0.1'
PORT = 9999
sender_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sender_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sender_socket.bind((HOST, PORT))
sender_socket.listen()
m = 3
size = (m ** 2) - 1
def send(sender, addr):
packet = 0
print('Connect \nHost ip:', addr[0], '\nPort:', addr[1])
window = list()
for i in range(0, m + 1):
window.append(i)
while True:
print("---------------------------------------")
var = random.choice([True, False])
if var:
print("Send Packet ", packet)
time.sleep(1)
else:
print("Lost Packet")
print("Resend Packet")
time.sleep(1)
continue
time.sleep(1)
data = sender.recv(1024)
if data.decode() == "Final":
break
ACK = int(data.decode()) % m
print("receive ACK: ", ACK)
print("sliding window: ", window[0], window[1], window[2], ">>", window[1], window[2], window[3])
tmp = window[0]
window.pop(0)
window.append(tmp)
if not data:
print('Disconnect')
break
count = 0
while int(ACK) - 1 != packet:
time.sleep(0.5)
count += 1
if count < 10:
break
continue
sender.send(data)
packet += 1
if packet > m:
packet -= m
time.sleep(1)
return False
while True:
print('server start')
print('wait')
sender, addr = sender_socket.accept()
if send(sender, addr) == False:
sender.close()
print("Disconnect")
break
|
'Accumulate the ELBO from a list of utterances given from "stdin"'
import argparse
import pickle
import sys
import numpy as np
import beer
def setup(parser):
parser.add_argument('-a', '--alis', help='alignment graphs in a "npz" '
'archive')
parser.add_argument('-s', '--acoustic-scale', default=1., type=float,
help='scaling factor of the acoutsic model')
parser.add_argument('model', help='hmm based model')
parser.add_argument('dataset', help='training data set')
parser.add_argument('out', help='output accumulated ELBO')
def main(args, logger):
logger.debug('load the model')
with open(args.model, 'rb') as f:
model = pickle.load(f)
logger.debug('load the dataset')
with open(args.dataset, 'rb') as f:
dataset = pickle.load(f)
alis = None
if args.alis:
logger.debug('loading alignment graphs')
alis = np.load(args.alis)
elbo = beer.evidence_lower_bound(datasize=dataset.size)
count = 0
for line in sys.stdin:
uttid = line.strip().split()[0]
utt = dataset[uttid]
aligraph = None
if alis:
try:
aligraph = alis[uttid][0]
except KeyError:
logger.warning(f'no alignment graph for utterance "{uttid}"')
logger.debug(f'processing utterance: {utt.id}')
elbo += beer.evidence_lower_bound(model, utt.features,
inference_graph=aligraph,
datasize=dataset.size,
scale=args.acoustic_scale)
count += 1
logger.debug('saving the accumulated ELBO...')
with open(args.out, 'wb') as f:
pickle.dump((elbo, count), f)
logger.info(f'accumulated ELBO over {count} utterances: {float(elbo) / (count * dataset.size) :.3f}.')
if __name__ == "__main__":
main()
|
from circuit import *
from dec_base import *
import z3
import time
class pwrdipObj(dipObj):
def __init__(self):
super().__init__()
self.flips = []
self.pwrsig = -1
return
class CirDecryptSca(CirDecrypt):
def __init__(self, enc_cir, corrkey):
super().__init__(None, enc_cir, corrkey)
return
def query_pwr_di(self, pwdi):
simmap0 = dict()
simmap1 = dict()
ind = 0
for xid in self.enc_cir.inputs():
sxid = self.enc2sim[xid]
simmap0[sxid] = pwdi.inputs[ind]
simmap1[sxid] = pwdi.inputs[ind] ^ pwdi.flips[ind]
ind += 1
ind = 0
for kid in self.enc_cir.keys():
simmap0[kid] = self.corrkey[ind]
simmap1[kid] = self.corrkey[ind]
ind += 1
self.enc_cir.simulate(simmap0)
self.enc_cir.simulate(simmap1)
pwdi.pwrsig = 0
for wid in self.enc_cir.wires():
if self.enc_cir.is_input(wid) or self.enc_cir.is_key(wid):
continue
if simmap0[wid] != simmap1[wid]:
pwdi.pwrsig += 1
print('dis: pwr={0} '.format(pwdi.pwrsig), end='')
return
# side channel z3 attack
class CirDecryptScaZ3(CirDecryptSca, CirDecryptZ3):
def __init__(self, enc_cir, corrkey, iteration_limit=20):
super().__init__(None, enc_cir, corrkey)
self.pwrcir = None
self.pwr_mitter = None
self.pwr_iteration = 0
return
def build_pwr_mitter(self, enc_cir):
self.pwrcir = copy.deepcopy(enc_cir)
added2new = dict()
for xid in enc_cir.inputs():
xfliped = self.pwrcir.add_wire(ntype.INTER, enc_cir.name(xid) + '_fl')
xflipin = self.pwrcir.add_wire(ntype.IN, enc_cir.name(xid) + '_$f')
self.pwrcir.add_gate_wids(gfun.XOR, [xflipin, xid], xfliped)
added2new[xid] = xfliped
for kid in enc_cir.keys():
added2new[kid] = kid
self.pwrcir.add_circuit(enc_cir, added2new, '_$f')
#self.pwrcir.write_bench()
ind = 0
for oid in enc_cir.wires():
if enc_cir.is_input(oid) or enc_cir.is_key(oid):
continue
onm1 = enc_cir.name(oid)
onm2 = onm1 + '_$f'
oid1 = self.pwrcir.find_wcheck(onm1)
oid2 = self.pwrcir.find_wcheck(onm2)
pwrxor = self.pwrcir.add_wire(ntype.INTER, 'pwr_{0}'.format(ind))
self.pwrcir.add_gate_wids(gfun.XOR, {oid1, oid2}, pwrxor)
if enc_cir.is_output(oid1):
self.pwrcir.set_wiretype(oid1, ntype.INTER)
self.pwrcir.set_wiretype(oid2, ntype.INTER)
ind += 1
#self.pwrcir.write_bench()
self.pwr_mitter = copy.deepcopy(self.pwrcir)
added2new = dict()
for xid in self.pwrcir.inputs():
added2new[xid] = xid
self.pwr_mitter.add_circuit(self.pwrcir, added2new, '_$1')
self.pwr_mitter.write_bench()
# now add to solver and build non-boolean formula
self.pwrmitt2var = dict()
for kid in enc_cir.keys():
knm0 = enc_cir.name(kid)
knm1 = knm0 + '_$1'
kid0 = self.pwr_mitter.find_wcheck(knm0)
kid1 = self.pwr_mitter.find_wcheck(knm1)
mkid0 = self.mitter.find_wcheck(knm0)
mkid1 = self.mitter.find_wcheck(knm1)
self.pwrmitt2var[kid0] = self.mittwid2var[mkid0]
self.pwrmitt2var[kid1] = self.mittwid2var[mkid1]
add_circuit_to_solver(self.pwr_mitter, self.solver, self.pwrmitt2var)
# add accumulator and comparators
self.pwidvec0 = []
self.pwidvec1 = []
ind = 0
for oid in enc_cir.wires():
if enc_cir.is_input(oid) or enc_cir.is_key(oid):
continue
pwnm0 = 'pwr_{0}'.format(ind)
pwnm1 = pwnm0 + '_$1'
pwid0 = self.pwr_mitter.find_wcheck(pwnm0)
pwid1 = self.pwr_mitter.find_wcheck(pwnm1)
self.pwidvec0.append(pwid0)
self.pwidvec1.append(pwid1)
ind += 1
print(len(self.pwidvec0), len(self.pwidvec1))
self.accu0 = add_count_ones_to_solver(self.pwidvec0, self.solver, self.pwrmitt2var)
self.accu1 = add_count_ones_to_solver(self.pwidvec1, self.solver, self.pwrmitt2var)
self.pwr_dis_var = (self.accu0 != self.accu1)
return
def solve_for_pwr_di(self):
status = self.solver.check([self.pwr_dis_var, self.pwr_const_var])
if status == z3.unsat:
return None
elif status == z3.sat:
pwdi = pwrdipObj()
#print(self.solver.model())
for xid in self.enc_cir.inputs():
xid = self.pwr_mitter.find_wcheck(self.enc_cir.name(xid))
fid = self.pwr_mitter.find_wcheck(self.enc_cir.name(xid) + '_$f')
xv = self.pwrmitt2var[xid]
fv = self.pwrmitt2var[fid]
b = z3.is_true(self.solver.model()[xv])
fb = z3.is_true(self.solver.model()[fv])
#print(int(b), end='')
pwdi.inputs.append(b)
pwdi.flips.append(fb)
print('found power dip: ', end='')
print('x=', end='')
for x in pwdi.inputs:
print(int(x), end='')
print(' f=', end='')
for x in pwdi.flips:
print(int(x), end='')
print()
print(self.solver.model()[self.accu0], ' != ', self.solver.model()[self.accu1])
return pwdi
else:
assert(False)
def add_pwr_constraint(self, pwdi):
varmap = dict()
for kid in self.pwr_mitter.keys():
knm = self.pwr_mitter.name(kid)
mkid = self.mitter.find_wcheck(knm)
varmap[kid] = self.mittwid2var[mkid]
ind = 0
for xid in self.enc_cir.inputs():
xnm = self.enc_cir.name(xid)
pwrxid = self.pwr_mitter.find_wcheck(xnm)
pwrfid = self.pwr_mitter.find_wcheck(xnm + '_$f')
varmap[pwrxid] = self.solver_one if pwdi.inputs[ind] else self.solver_zero
varmap[pwrfid] = self.solver_one if pwdi.flips[ind] else self.solver_zero
ind += 1
add_circuit_to_solver(self.pwr_mitter, self.solver, varmap)
pwrsig0 = add_count_ones_to_solver(self.pwidvec0, self.solver, varmap)
pwrsig1 = add_count_ones_to_solver(self.pwidvec1, self.solver, varmap)
print('adding power constraint at ', pwdi.pwrsig)
self.pwr_const_var = z3.And(pwrsig0 == pwdi.pwrsig, pwrsig1 == pwdi.pwrsig, self.pwr_const_var)
return
def init_solver_pwr(self):
self.init_solver()
self.pwr_const_var = self.solver_one
return
def extract_key_pwr(self):
if not self.solver.check(self.pwr_const_var):
print('constraints not solvable')
exit(1)
extkey = []
for kid in self.enc_cir.keys():
knm = self.enc_cir.name(kid)
mittid = self.mitter.find_wcheck(knm)
kv = self.mittwid2var[mittid]
extkey.append(z3.is_true(self.solver.model()[kv]))
return extkey
def solve_pwr(self):
self.build_mitter(self.enc_cir)
self.build_iocir(self.enc_cir)
self.init_solver_pwr()
self.build_pwr_mitter(self.enc_cir)
while True:
di = self.solve_for_di()
if di is None:
print('functional mitter unsat')
break
pwdi = self.solve_for_pwr_di()
if pwdi is None:
print('power mitter unsat')
break
self.iteration += 1
if self.iteration_limit != -1 and self.iteration >= self.iteration_limit:
print('reached iteration limit')
break
self.query_pwr_di(pwdi)
self.add_pwr_constraint(pwdi)
self.curkey = self.extract_key_pwr()
print('last key=', end='')
for kb in self.curkey:
print(int(kb), end='')
print()
self.check_key(self.curkey)
return
if __name__ == '__main__':
if len(sys.argv) != 5:
print('usage: dec_z3.py <sim_cir> <enc_cir> <correct_key> <iterations>')
exit(1)
sim_cir = Circuit(sys.argv[1])
enc_cir = Circuit(sys.argv[2])
corrkey = [int(x) for x in (sys.argv[3].split('=')[1])]
cdec = CirDecryptScaZ3(sim_cir, enc_cir, corrkey, int(sys.argv[4]))
cdec.solve_pwr()
|
import sys
import logging
import argparse
from materials_commons.api import get_all_projects
from ..utils.LoggingHelper import LoggingHelper
from ..internal_etl.BuildProjectExperimentWithETL import BuildProjectExperiment
def main(project, user_id, apikey, excel_file_path, data_dir_path):
main_log = logging.getLogger("main")
experiment_name = "Test from excel"
experiment_description = "An experiment built via etl from test data"
main_log.info("user_id = {}".format(user_id))
main_log.info("apikey = {}".format(apikey))
main_log.info("project = '{}' ({})".format(project.name, project.id))
main_log.info("experiment_name = {}".format(experiment_name))
main_log.info("experiment_description = {}".format(experiment_description))
main_log.info("excel_file_path = {}".format(excel_file_path))
main_log.info("data_dir_path = {}".format(data_dir_path))
handler = BuildProjectExperiment(apikey)
main_log.info("Starting ETL")
etl_status = handler.build(excel_file_path, data_dir_path, project.id,
experiment_name, exp_description=experiment_description)
main_log.info("ETL status = {}".format(etl_status))
if __name__ == "__main__":
LoggingHelper().set_root()
startup_log = logging.getLogger("main-setup")
argv = sys.argv
parser = argparse.ArgumentParser(
description='Run the ETL process with test data as loaded into a give project')
parser.add_argument('--user', type=str, help="Materials Commons user id")
parser.add_argument('--apikey', type=str, help="Materials Commons user's apikey")
parser.add_argument('--name', type=str, help="Name of project")
parser.add_argument('--input', type=str, help="Input Spreadsheet File (path relative to project)")
parser.add_argument('--data', type=str, help="Data Directory (path relative to project)")
args = parser.parse_args(argv[1:])
if not args.user:
print("You must specify Materials Commons user id. Argument not found.")
parser.print_help()
exit(-1)
if not args.apikey:
print("You must specify the user's apikey. Argument not found.")
parser.print_help()
exit(-1)
if not args.name:
print("You must specify the name of the project containing the spreadsheet and data. Argument not found.")
parser.print_help()
exit(-1)
if not args.input:
print("You must specify the path for the spreadsheet file (relative to the base path). Argument not found.")
parser.print_help()
exit(-1)
if not args.data:
print("You must specify the path for the data directory (relative to the base path). Argument not found.")
parser.print_help()
exit(-1)
startup_log.info("Searching for project with name-match = {}".format(args.name))
project_list = get_all_projects(apikey=args.apikey)
project_selected = None
for probe in project_list:
if args.name in probe.name:
if project_selected:
print("Found multiple matches for {}".format(args.name))
print("You must specify a unique project name, or name substring.")
parser.print_help()
exit(-1)
project_selected = probe
if not project_selected:
print("Found no matches for {}".format(args.name))
print("You must specify a unique project name, or name substring.")
parser.print_help()
exit(-1)
startup_log.info("Found match with name-match = {}; project.name = {}; id = {}".
format(args.name, project_selected.name, project_selected.id))
main(project_selected, args.user, args.apikey, args.input, args.data)
startup_log.info("Done; project name = {}".format(project_selected.name))
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Relaxed OneHotCategorical distribution classes."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
class ExpRelaxedOneHotCategorical(distribution.AutoCompositeTensorDistribution):
"""ExpRelaxedOneHotCategorical distribution with temperature and logits.
An ExpRelaxedOneHotCategorical distribution is a log-transformed
RelaxedOneHotCategorical distribution. The RelaxedOneHotCategorical is a
distribution over random probability vectors, vectors of positive real
values that sum to one, which continuously approximates a OneHotCategorical.
The degree of approximation is controlled by a temperature: as the temperature
goes to 0 the RelaxedOneHotCategorical becomes discrete with a distribution
described by the logits, as the temperature goes to infinity the
RelaxedOneHotCategorical becomes the constant distribution that is identically
the constant vector of (1/event_size, ..., 1/event_size).
Because computing log-probabilities of the RelaxedOneHotCategorical can
suffer from underflow issues, this class is one solution for loss
functions that depend on log-probabilities, such as the KL Divergence found
in the variational autoencoder loss. The KL divergence between two
distributions is invariant under invertible transformations, so evaluating
KL divergences of ExpRelaxedOneHotCategorical samples, which are always
followed by a `tf.exp` op, is equivalent to evaluating KL divergences of
RelaxedOneHotCategorical samples. See the appendix of Maddison et al., 2016
for more mathematical details, where this distribution is called the
ExpConcrete.
#### Examples
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution. If those samples
are followed by a `tf.exp` op, then they are distributed as a relaxed onehot
categorical.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = ExpRelaxedOneHotCategorical(temperature, probs=p)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 0 and the
others very negative. The 2nd class is the most likely to be the largest
component in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, whose exp approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (-log(3), -log(3), -log(3)) vector.
The 2nd class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = ExpRelaxedOneHotCategorical(temperature, logits=logits)
samples = dist.sample()
exp_samples = tf.exp(samples)
# exp_samples has the same distribution as samples from
# RelaxedOneHotCategorical(temperature, probs=p)
```
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
def __init__(
self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name='ExpRelaxedOneHotCategorical'):
"""Initialize ExpRelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: A `Tensor`, representing the temperature of one or more
distributions. The temperature values must be positive, and the shape
must broadcast against `(logits or probs)[..., 0]`.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of one or many distributions. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of logits for each class. Only one of `logits` or `probs` should
be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of one or many distributions. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of probabilities for each class. Only one of `logits` or `probs`
should be passed in.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([logits, probs, temperature], tf.float32)
self._temperature = tensor_util.convert_nonref_to_tensor(
temperature, dtype_hint=dtype, name='temperature')
self._logits = tensor_util.convert_nonref_to_tensor(
logits, dtype_hint=dtype, name='logits')
self._probs = tensor_util.convert_nonref_to_tensor(
probs, dtype_hint=dtype, name='probs')
if (self._probs is None) == (self._logits is None):
raise ValueError('Must pass `probs` or `logits`, but not both.')
super(ExpRelaxedOneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
temperature=parameter_properties.ParameterProperties(
shape_fn=lambda sample_shape: sample_shape[:-1],
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
logits=parameter_properties.ParameterProperties(event_ndims=1),
probs=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=softmax_centered_bijector
.SoftmaxCentered,
is_preferred=False))
# pylint: enable=g-long-lambda
@property
@deprecation.deprecated(
'2019-10-01', 'The `event_size` property is deprecated. Use '
'`tf.shape(self.probs if self.logits is None else self.logits)[-1]` '
'instead.')
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size()
def _event_size(self, logits=None):
param = logits
if param is None:
param = self._logits if self._logits is not None else self._probs
if param.shape is not None:
event_size = tf.compat.dimension_value(param.shape[-1])
if event_size is not None:
return event_size
return tf.shape(param)[-1]
@property
def temperature(self):
"""Batchwise temperature tensor of a RelaxedCategorical."""
return self._temperature
@property
def logits(self):
"""Input argument `logits`."""
return self._logits
@property
def probs(self):
"""Input argument `probs`."""
return self._probs
def _event_shape_tensor(self, logits=None):
param = logits
if param is None:
param = self._logits if self._logits is not None else self._probs
return ps.shape(param)[-1:]
def _event_shape(self):
param = self._logits if self._logits is not None else self._probs
return tensorshape_util.with_rank(param.shape[-1:], rank=1)
def _sample_n(self, n, seed=None):
temperature = tf.convert_to_tensor(self.temperature)
logits = self._logits_parameter_no_checks()
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use
# `np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny` because it is the
# smallest, positive, 'normal' number. A 'normal' number is such that the
# mantissa has an implicit leading 1. Normal, positive numbers x, y have the
# reasonable property that, `x + y >= max(x, y)`. In this case, a subnormal
# number (i.e., np.nextafter) can cause us to sample 0.
uniform_shape = ps.concat(
[[n],
self._batch_shape_tensor(temperature=temperature, logits=logits),
self._event_shape_tensor(logits=logits)], 0)
uniform = samplers.uniform(
shape=uniform_shape,
minval=np.finfo(dtype_util.as_numpy_dtype(self.dtype)).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
gumbel = -tf.math.log(-tf.math.log(uniform))
noisy_logits = (gumbel + logits) / temperature[..., tf.newaxis]
return tf.math.log_softmax(noisy_logits)
def _log_prob(self, x):
temperature = tf.convert_to_tensor(self.temperature)
logits = self._logits_parameter_no_checks()
# broadcast logits or x if need be.
if (not tensorshape_util.is_fully_defined(x.shape) or
not tensorshape_util.is_fully_defined(logits.shape) or
x.shape != logits.shape):
logits = tf.ones_like(x, dtype=logits.dtype) * logits
x = tf.ones_like(logits, dtype=x.dtype) * x
# compute the normalization constant
k = tf.cast(self._event_size(logits), x.dtype)
log_norm_const = (
tf.math.lgamma(k) + (k - 1.) * tf.math.log(temperature))
# compute the unnormalized density
log_softmax = tf.math.log_softmax(logits - x * temperature[..., tf.newaxis])
log_unnorm_prob = tf.reduce_sum(log_softmax, axis=[-1], keepdims=False)
# combine unnormalized density with normalization constant
return log_norm_const + log_unnorm_prob
def logits_parameter(self, name=None):
"""Logits vec computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'logits_parameter'):
return self._logits_parameter_no_checks()
def _logits_parameter_no_checks(self):
if self._logits is None:
return tf.math.log(self._probs)
return tensor_util.identity_as_tensor(self._logits)
def probs_parameter(self, name=None):
"""Probs vec computed from non-`None` input arg (`probs` or `logits`)."""
with self._name_and_control_scope(name or 'probs_parameter'):
return self._probs_parameter_no_checks()
def _probs_parameter_no_checks(self):
if self._logits is None:
return tensor_util.identity_as_tensor(self._probs)
return tf.math.softmax(self._logits)
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_positive(
x,
message=('Samples must be less than or equal to `0` for '
'`ExpRelaxedOneHotCategorical` or `1` for '
'`RelaxedOneHotCategorical`.')))
assertions.append(assert_util.assert_near(
tf.zeros([], dtype=self.dtype), tf.reduce_logsumexp(x, axis=[-1]),
message=('Final dimension of samples must sum to `0` for ''.'
'`ExpRelaxedOneHotCategorical` or `1` '
'for `RelaxedOneHotCategorical`.')))
return assertions
def _parameter_control_dependencies(self, is_init):
assertions = []
logits = self._logits
probs = self._probs
param, name = (probs, 'probs') if logits is None else (logits, 'logits')
# In init, we can always build shape and dtype checks because
# we assume shape doesn't change for Variable backed args.
if is_init:
if not dtype_util.is_floating(param.dtype):
raise TypeError('Argument `{}` must having floating type.'.format(name))
msg = 'Argument `{}` must have rank at least 1.'.format(name)
shape_static = tensorshape_util.dims(param.shape)
if shape_static is not None:
if len(shape_static) < 1:
raise ValueError(msg)
elif self.validate_args:
param = tf.convert_to_tensor(param)
assertions.append(
assert_util.assert_rank_at_least(param, 1, message=msg))
msg1 = 'Argument `{}` must have final dimension >= 1.'.format(name)
msg2 = 'Argument `{}` must have final dimension <= {}.'.format(
name, dtype_util.max(tf.int32))
event_size = shape_static[-1] if shape_static is not None else None
if event_size is not None:
if event_size < 1:
raise ValueError(msg1)
if event_size > dtype_util.max(tf.int32):
raise ValueError(msg2)
elif self.validate_args:
param = tf.convert_to_tensor(param)
assertions.append(assert_util.assert_greater_equal(
tf.shape(param)[-1:], 1, message=msg1))
# NOTE: For now, we leave out a runtime assertion that
# `tf.shape(param)[-1] <= tf.int32.max`. An earlier `tf.shape` call
# will fail before we get to this point.
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(self.temperature):
assertions.append(assert_util.assert_positive(self.temperature))
if probs is not None:
probs = param # reuse tensor conversion from above
if is_init != tensor_util.is_ref(probs):
probs = tf.convert_to_tensor(probs)
one = tf.ones([], dtype=probs.dtype)
assertions.extend([
assert_util.assert_non_negative(probs),
assert_util.assert_less_equal(probs, one),
assert_util.assert_near(
tf.reduce_sum(probs, axis=-1), one,
message='Argument `probs` must sum to 1.'),
])
return assertions
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector.
return chain_bijector.Chain([
exp_bijector.Log(validate_args=self.validate_args),
softmax_centered_bijector.SoftmaxCentered(
validate_args=self.validate_args),
], validate_args=self.validate_args)
class RelaxedOneHotCategorical(
transformed_distribution.TransformedDistribution,
distribution.AutoCompositeTensorDistribution):
"""RelaxedOneHotCategorical distribution with temperature and logits.
The RelaxedOneHotCategorical is a distribution over random probability
vectors, vectors of positive real values that sum to one, which continuously
approximates a OneHotCategorical. The degree of approximation is controlled by
a temperature: as the temperature goes to 0 the RelaxedOneHotCategorical
becomes discrete with a distribution described by the `logits` or `probs`
parameters, as the temperature goes to infinity the RelaxedOneHotCategorical
becomes the constant distribution that is identically the constant vector of
(1/event_size, ..., 1/event_size).
The RelaxedOneHotCategorical distribution was concurrently introduced as the
Gumbel-Softmax (Jang et al., 2016) and Concrete (Maddison et al., 2016)
distributions for use as a reparameterized continuous approximation to the
`Categorical` one-hot distribution. If you use this distribution, please cite
both papers.
#### Examples
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
p = [0.1, 0.5, 0.4]
dist = RelaxedOneHotCategorical(temperature, probs=p)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. The 2nd class is the most likely to be the
largest component in samples drawn from this distribution.
```python
temperature = 0.5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very low, samples from
this distribution are almost discrete, with one component almost 1 and the
others nearly 0. The 2nd class is the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 1e-5
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Creates a continuous distribution, which approximates a 3-class one-hot
categorical distribution. Because the temperature is very high, samples from
this distribution are usually close to the (1/3, 1/3, 1/3) vector. The 2nd
class is still the most likely to be the largest component
in samples drawn from this distribution.
```python
temperature = 10
logits = [-2, 2, 0]
dist = RelaxedOneHotCategorical(temperature, logits=logits)
```
Eric Jang, Shixiang Gu, and Ben Poole. Categorical Reparameterization with
Gumbel-Softmax. 2016.
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The Concrete Distribution:
A Continuous Relaxation of Discrete Random Variables. 2016.
"""
def __init__(
self,
temperature,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name='RelaxedOneHotCategorical'):
"""Initialize RelaxedOneHotCategorical using class log-probabilities.
Args:
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedOneHotCategorical distributions. The temperature
should be positive.
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of RelaxedOneHotCategorical distributions. The first
`N - 1` dimensions index into a batch of independent distributions and
the last dimension represents a vector of logits for each class. Only
one of `logits` or `probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of RelaxedOneHotCategorical distributions. The first `N - 1`
dimensions index into a batch of independent distributions and the last
dimension represents a vector of probabilities for each class. Only one
of `logits` or `probs` should be passed in.
validate_args: Unused in this distribution.
allow_nan_stats: Python `bool`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
parameters = dict(locals())
dist = ExpRelaxedOneHotCategorical(temperature,
logits=logits,
probs=probs,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
super(RelaxedOneHotCategorical, self).__init__(dist,
exp_bijector.Exp(),
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
temperature=parameter_properties.ParameterProperties(
shape_fn=lambda sample_shape: sample_shape[:-1],
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
logits=parameter_properties.ParameterProperties(event_ndims=1),
probs=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=softmax_centered_bijector
.SoftmaxCentered,
is_preferred=False))
# pylint: enable=g-long-lambda
@property
def temperature(self):
"""Batchwise temperature tensor of a RelaxedCategorical."""
return self.distribution.temperature
@property
@deprecation.deprecated(
'2019-10-01', 'The `event_size` property is deprecated. Use '
'`tf.shape(self.probs if self.logits is None else self.logits)[-1]` '
'instead.')
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self.distribution.event_size
@property
def probs(self):
"""Input argument `probs`."""
return self.distribution.probs
@property
def logits(self):
"""Input argument `logits`."""
return self.distribution.logits
experimental_is_sharded = False
def logits_parameter(self, name=None):
"""Logits vec computed from non-`None` input arg (`probs` or `logits`)."""
return self.distribution.logits_parameter(name)
def probs_parameter(self, name=None):
"""Probs vec computed from non-`None` input arg (`probs` or `logits`)."""
return self.distribution.probs_parameter(name)
def _default_event_space_bijector(self):
return softmax_centered_bijector.SoftmaxCentered(
validate_args=self.validate_args)
|
"""
DriverFactory class
NOTE: Change this class as you add support for:
1. SauceLabs/BrowserStack
2. More browsers like Opera
"""
import dotenv,os,sys,requests,json
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome import service
from selenium.webdriver.remote.webdriver import RemoteConnection
from appium import webdriver as mobile_webdriver
from conf import remote_credentials
from conf import opera_browser_conf
class DriverFactory():
def __init__(self,browser='ff',browser_version=None,os_name=None):
"Constructor for the Driver factory"
self.browser=browser
self.browser_version=browser_version
self.os_name=os_name
def get_web_driver(self,remote_flag,os_name,os_version,browser,browser_version,remote_project_name,remote_build_name):
"Return the appropriate driver"
if (remote_flag.lower() == 'y'):
try:
if remote_credentials.REMOTE_BROWSER_PLATFORM == 'BS':
web_driver = self.run_browserstack(os_name,os_version,browser,browser_version,remote_project_name,remote_build_name)
else:
web_driver = self.run_sauce_lab(os_name,os_version,browser,browser_version)
except Exception as e:
print("\nException when trying to get remote webdriver:%s"%sys.modules[__name__])
print("Python says:%s"%str(e))
print("SOLUTION: It looks like you are trying to use a cloud service provider (BrowserStack or Sauce Labs) to run your test. \nPlease make sure you have updated ./conf/remote_credentials.py with the right credentials and try again. \nTo use your local browser please run the test with the -M N flag.\n")
elif (remote_flag.lower() == 'n'):
web_driver = self.run_local(os_name,os_version,browser,browser_version)
else:
print("DriverFactory does not know the browser: ",browser)
web_driver = None
return web_driver
def run_browserstack(self,os_name,os_version,browser,browser_version,remote_project_name,remote_build_name):
"Run the test in browser stack when remote flag is 'Y'"
#Get the browser stack credentials from browser stack credentials file
USERNAME = remote_credentials.USERNAME
PASSWORD = remote_credentials.ACCESS_KEY
if browser.lower() == 'ff' or browser.lower() == 'firefox':
desired_capabilities = DesiredCapabilities.FIREFOX
elif browser.lower() == 'ie':
desired_capabilities = DesiredCapabilities.INTERNETEXPLORER
elif browser.lower() == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
elif browser.lower() == 'opera':
desired_capabilities = DesiredCapabilities.OPERA
elif browser.lower() == 'safari':
desired_capabilities = DesiredCapabilities.SAFARI
desired_capabilities['os'] = os_name
desired_capabilities['os_version'] = os_version
desired_capabilities['browser_version'] = browser_version
if remote_project_name is not None:
desired_capabilities['project'] = remote_project_name
if remote_build_name is not None:
desired_capabilities['build'] = remote_build_name+"_"+str(datetime.now().strftime("%c"))
return webdriver.Remote(RemoteConnection("http://%s:%s@hub-cloud.browserstack.com/wd/hub"%(USERNAME,PASSWORD),resolve_ip= False),
desired_capabilities=desired_capabilities)
def run_sauce_lab(self,os_name,os_version,browser,browser_version):
"Run the test in sauce labs when remote flag is 'Y'"
#Get the sauce labs credentials from sauce.credentials file
USERNAME = remote_credentials.USERNAME
PASSWORD = remote_credentials.ACCESS_KEY
if browser.lower() == 'ff' or browser.lower() == 'firefox':
desired_capabilities = DesiredCapabilities.FIREFOX
elif browser.lower() == 'ie':
desired_capabilities = DesiredCapabilities.INTERNETEXPLORER
elif browser.lower() == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
elif browser.lower() == 'opera':
desired_capabilities = DesiredCapabilities.OPERA
elif browser.lower() == 'safari':
desired_capabilities = DesiredCapabilities.SAFARI
desired_capabilities['version'] = browser_version
desired_capabilities['platform'] = os_name + ' '+os_version
return webdriver.Remote(command_executor="http://%s:%s@ondemand.saucelabs.com:80/wd/hub"%(USERNAME,PASSWORD),
desired_capabilities= desired_capabilities)
def run_local(self,os_name,os_version,browser,browser_version):
"Return the local driver"
local_driver = None
if browser.lower() == "ff" or browser.lower() == 'firefox':
local_driver = webdriver.Firefox()
elif browser.lower() == "ie":
local_driver = webdriver.Ie()
elif browser.lower() == "chrome":
local_driver = webdriver.Chrome()
elif browser.lower() == "opera":
opera_options = None
try:
opera_browser_location = opera_browser_conf.location
options = webdriver.ChromeOptions()
options.binary_location = opera_browser_location # path to opera executable
local_driver = webdriver.Opera(options=options)
except Exception as e:
print("\nException when trying to get remote webdriver:%s"%sys.modules[__name__])
print("Python says:%s"%str(e))
if 'no Opera binary' in str(e):
print("SOLUTION: It looks like you are trying to use Opera Browser. Please update Opera Browser location under conf/opera_browser_conf.\n")
elif browser.lower() == "safari":
local_driver = webdriver.Safari()
return local_driver
def run_mobile(self,mobile_os_name,mobile_os_version,device_name,app_package,app_activity,remote_flag,device_flag,app_name,app_path):
"Setup mobile device"
#Get the remote credentials from remote_credentials file
USERNAME = remote_credentials.USERNAME
PASSWORD = remote_credentials.ACCESS_KEY
desired_capabilities = {}
desired_capabilities['platformName'] = mobile_os_name
desired_capabilities['platformVersion'] = mobile_os_version
desired_capabilities['deviceName'] = device_name
if (remote_flag.lower() == 'y'):
desired_capabilities['idleTimeout'] = 300
desired_capabilities['name'] = 'Appium Python Test'
try:
if remote_credentials.REMOTE_BROWSER_PLATFORM == 'SL':
self.sauce_upload(app_path,app_name) #Saucelabs expects the app to be uploaded to Sauce storage everytime the test is run
#Checking if the app_name is having spaces and replacing it with blank
if ' ' in app_name:
app_name = app_name.replace(' ','')
desired_capabilities['app'] = 'sauce-storage:'+app_name
desired_capabilities['autoAcceptAlert']= 'true'
driver = mobile_webdriver.Remote(command_executor="http://%s:%s@ondemand.saucelabs.com:80/wd/hub"%(USERNAME,PASSWORD),
desired_capabilities= desired_capabilities)
else:
desired_capabilities['realMobile'] = 'true'
desired_capabilities['app'] = self.browser_stack_upload(app_name,app_path) #upload the application to the Browserstack Storage
driver = mobile_webdriver.Remote(command_executor="http://%s:%s@hub.browserstack.com:80/wd/hub"%(USERNAME,PASSWORD),
desired_capabilities= desired_capabilities)
except Exception as e:
print ('\033[91m'+"\nException when trying to get remote webdriver:%s"%sys.modules[__name__]+'\033[0m')
print ('\033[91m'+"Python says:%s"%str(e)+'\033[0m')
print ('\033[92m'+"SOLUTION: It looks like you are trying to use a cloud service provider (BrowserStack or Sauce Labs) to run your test. \nPlease make sure you have updated ./conf/remote_credentials.py with the right credentials and try again. \nTo use your local browser please run the test with the -M N flag.\n"+'\033[0m')
else:
try:
desired_capabilities['appPackage'] = app_package
desired_capabilities['appActivity'] = app_activity
if device_flag.lower() == 'y':
driver = mobile_webdriver.Remote('http://localhost:4723/wd/hub', desired_capabilities)
else:
desired_capabilities['app'] = os.path.join(app_path,app_name)
driver = mobile_webdriver.Remote('http://localhost:4723/wd/hub', desired_capabilities)
except Exception as e:
print ('\033[91m'+"\nException when trying to get remote webdriver:%s"%sys.modules[__name__]+'\033[0m')
print ('\033[91m'+"Python says:%s"%str(e)+'\033[0m')
print ('\033[92m'+"SOLUTION: It looks like you are trying to run test cases with Local Appium Setup. \nPlease make sure to run Appium Server and try again.\n"+'\033[0m')
return driver
def sauce_upload(self,app_path,app_name):
"Upload the apk to the sauce temperory storage"
USERNAME = remote_credentials.USERNAME
PASSWORD = remote_credentials.ACCESS_KEY
result_flag=False
try:
headers = {'Content-Type':'application/octet-stream'}
params = os.path.join(app_path,app_name)
fp = open(params,'rb')
data = fp.read()
fp.close()
#Checking if the app_name is having spaces and replacing it with blank
if ' ' in app_name:
app_name = app_name.replace(' ','')
print ("The app file name is having spaces, hence replaced the white spaces with blank in the file name:%s"%app_name)
response = requests.post('https://saucelabs.com/rest/v1/storage/%s/%s?overwrite=true'%(USERNAME,app_name),headers=headers,data=data,auth=(USERNAME,PASSWORD))
if response.status_code == 200:
result_flag=True
print ("App successfully uploaded to sauce storage")
except Exception as e:
print (str(e))
return result_flag
def browser_stack_upload(self,app_name,app_path):
"Upload the apk to the BrowserStack storage if its not done earlier"
USERNAME = remote_credentials.USERNAME
ACESS_KEY = remote_credentials.ACCESS_KEY
try:
#Upload the apk
apk_file = os.path.join(app_path,app_name)
files = {'file': open(apk_file,'rb')}
post_response = requests.post("https://api.browserstack.com/app-automate/upload",files=files,auth=(USERNAME,ACESS_KEY))
post_json_data = json.loads(post_response.text)
#Get the app url of the newly uploaded apk
app_url = post_json_data['app_url']
except Exception as e:
print(str(e))
return app_url
def get_firefox_driver(self):
"Return the Firefox driver"
driver = webdriver.Firefox(firefox_profile=self.get_firefox_profile())
return driver
def get_firefox_profile(self):
"Return a firefox profile"
return self.set_firefox_profile()
def set_firefox_profile(self):
"Setup firefox with the right preferences and return a profile"
try:
self.download_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','downloads'))
if not os.path.exists(self.download_dir):
os.makedirs(self.download_dir)
except Exception as e:
print("Exception when trying to set directory structure")
print(str(e))
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
set_pref = profile.set_preference
set_pref('browser.download.folderList', 2)
set_pref('browser.download.dir', self.download_dir)
set_pref('browser.download.useDownloadDir', True)
set_pref('browser.helperApps.alwaysAsk.force', False)
set_pref('browser.helperApps.neverAsk.openFile', 'text/csv,application/octet-stream,application/pdf')
set_pref('browser.helperApps.neverAsk.saveToDisk', 'text/csv,application/vnd.ms-excel,application/pdf,application/csv,application/octet-stream')
set_pref('plugin.disable_full_page_plugin_for_types', 'application/pdf')
set_pref('pdfjs.disabled',True)
return profile
|
from __future__ import absolute_import
import mock
import pytest
from detect_secrets.plugins.core import initialize
from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString
from detect_secrets.plugins.high_entropy_strings import HexHighEntropyString
class TestFromPluginClassname(object):
def test_success(self):
plugin = initialize.from_plugin_classname(
'HexHighEntropyString',
hex_limit=4,
)
assert isinstance(plugin, HexHighEntropyString)
assert plugin.entropy_limit == 4
def test_fails_if_not_base_plugin(self):
with pytest.raises(TypeError):
initialize.from_plugin_classname(
'log',
)
def test_fails_on_bad_initialization(self):
with mock.patch.object(
HexHighEntropyString,
'__init__',
side_effect=TypeError,
), pytest.raises(
TypeError,
):
initialize.from_plugin_classname(
'HexHighEntropyString',
hex_limit=4,
)
class TestFromSecretType(object):
def setup(self):
self.settings = [
{
'name': 'Base64HighEntropyString',
'base64_limit': 3,
},
{
'name': 'PrivateKeyDetector',
},
]
def test_success(self):
plugin = initialize.from_secret_type(
'Base64 High Entropy String',
self.settings,
)
assert isinstance(plugin, Base64HighEntropyString)
assert plugin.entropy_limit == 3
def test_failure(self):
assert not initialize.from_secret_type(
'some random secret_type',
self.settings,
)
|
import re, sys
from rpython.jit.metainterp.resoperation import opname
from rpython.jit.tool.oparser import OpParser
from rpython.tool.logparser import parse_log_file, extract_category
from copy import copy
def parse_code_data(arg):
name = None
lineno = 0
filename = None
bytecode_no = 0
bytecode_name = None
m = re.search('<code object ([<>\w]+)[\.,] file \'(.+?)\'[\.,] line (\d+)> #(\d+) (\w+)',
arg)
if m is None:
# a non-code loop, like StrLiteralSearch or something
if arg:
bytecode_name = arg
else:
name, filename, lineno, bytecode_no, bytecode_name = m.groups()
return name, bytecode_name, filename, int(lineno), int(bytecode_no)
class Op(object):
bridge = None
offset = None
asm = None
failargs = ()
def __init__(self, name, args, res, descr, failargs=None):
self.name = name
self.args = args
self.res = res
self.descr = descr
self._is_guard = name.startswith('guard_')
if self._is_guard:
self.guard_no = int(self.descr[len('<Guard0x'):-1], 16)
self.failargs = failargs
def as_json(self):
d = {
'name': self.name,
'args': self.args,
'res': self.res,
}
if self.descr is not None:
d['descr'] = self.descr
if self.bridge is not None:
d['bridge'] = self.bridge.as_json()
if self.asm is not None:
d['asm'] = self.asm
return d
def setfailargs(self, failargs):
self.failargs = failargs
def getarg(self, i):
return self.args[i]
def getargs(self):
return self.args[:]
def getres(self):
return self.res
def getdescr(self):
return self.descr
def is_guard(self):
return self._is_guard
def repr(self):
args = self.getargs()
if self.descr is not None:
args.append('descr=%s' % self.descr)
arglist = ', '.join(args)
if self.res is not None:
return '%s = %s(%s)' % (self.getres(), self.name, arglist)
else:
return '%s(%s)' % (self.name, arglist)
def __repr__(self):
return self.repr()
class SimpleParser(OpParser):
# factory method
Op = Op
use_mock_model = True
def postprocess(self, loop, backend_dump=None, backend_tp=None,
dump_start=0):
if backend_dump is not None:
raw_asm = self._asm_disassemble(backend_dump.decode('hex'),
backend_tp, dump_start)
asm = []
start = 0
for elem in raw_asm:
if len(elem.split("\t")) < 3:
continue
e = elem.split("\t")
adr = e[0]
v = elem # --- more compactly: " ".join(e[2:])
if not start:
start = int(adr.strip(":"), 16)
ofs = int(adr.strip(":"), 16) - start
if ofs >= 0:
asm.append((ofs, v.strip("\n")))
asm_index = 0
for i, op in enumerate(loop.operations):
end = 0
j = i + 1
while end == 0:
if j == len(loop.operations):
end = loop.last_offset
break
if loop.operations[j].offset is None:
j += 1
else:
end = loop.operations[j].offset
if op.offset is not None:
while asm[asm_index][0] < op.offset:
asm_index += 1
end_index = asm_index
while asm[end_index][0] < end and end_index < len(asm) - 1:
end_index += 1
op.asm = '\n'.join([asm[i][1] for i in range(asm_index, end_index)])
return loop
def _asm_disassemble(self, d, origin_addr, tp):
from rpython.jit.backend.tool.viewcode import machine_code_dump
return list(machine_code_dump(d, tp, origin_addr))
@classmethod
def parse_from_input(cls, input, **kwds):
parser = cls(input, None, {}, 'lltype', None,
nonstrict=True)
loop = parser.parse()
return parser.postprocess(loop, **kwds)
def parse_args(self, opname, argspec):
if not argspec.strip():
return [], None
if opname == 'debug_merge_point':
return argspec.split(", ", 2), None
else:
args = argspec.split(', ')
descr = None
if args[-1].startswith('descr='):
descr = args[-1][len('descr='):]
args = args[:-1]
if args == ['']:
args = []
return (args, descr)
def box_for_var(self, res):
return res
def create_op(self, opnum, args, res, descr, fail_args):
return self.Op(intern(opname[opnum].lower()), args, res,
descr, fail_args)
def create_op_no_result(self, opnum, args, descr, fail_args):
return self.Op(intern(opname[opnum].lower()), args, None,
descr, fail_args)
def update_memo(self, val, name):
pass
class NonCodeError(Exception):
pass
class TraceForOpcode(object):
code = None
is_bytecode = True
inline_level = None
has_dmp = False
def __init__(self, operations, storage, loopname):
for op in operations:
if op.name == 'debug_merge_point':
self.inline_level = int(op.args[0])
parsed = parse_code_data(op.args[2][1:-1])
(self.name, self.bytecode_name, self.filename,
self.startlineno, self.bytecode_no) = parsed
break
else:
self.inline_level = 0
parsed = parse_code_data(loopname)
(self.name, self.bytecode_name, self.filename,
self.startlineno, self.bytecode_no) = parsed
self.operations = operations
self.storage = storage
self.code = storage.disassemble_code(self.filename, self.startlineno,
self.name)
def repr(self):
if self.filename is None:
return self.bytecode_name
return "%s, file '%s', line %d" % (self.name, self.filename,
self.startlineno)
def getcode(self):
return self.code
def has_valid_code(self):
return self.code is not None
def getopcode(self):
if self.code is None:
return None
return self.code.map[self.bytecode_no]
def getlineno(self):
code = self.getopcode()
if code is None:
return None
return code.lineno
lineno = property(getlineno)
def getline_starts_here(self):
return self.getopcode().line_starts_here
line_starts_here = property(getline_starts_here)
def __repr__(self):
return "[%s\n]" % "\n ".join([repr(op) for op in self.operations])
def pretty_print(self, out):
pass
class Function(object):
filename = None
name = None
startlineno = 0
_linerange = None
_lineset = None
is_bytecode = False
inline_level = None
bytecode_name = None
# factory method
TraceForOpcode = TraceForOpcode
def __init__(self, chunks, path, storage, inputargs=''):
self.path = path
self.inputargs = inputargs
self.chunks = chunks
for chunk in self.chunks:
if chunk.bytecode_name is not None:
self.startlineno = chunk.startlineno
self.filename = chunk.filename
self.name = chunk.name
self.inline_level = chunk.inline_level
break
self.storage = storage
@classmethod
def from_operations(cls, operations, storage, limit=None, inputargs='',
loopname=''):
""" Slice given operation list into a chain of TraceForOpcode chunks.
Also detect inlined functions and make them Function
"""
stack = []
def getpath(stack):
return ",".join([str(len(v)) for v in stack])
def append_to_res(bc):
if bc.inline_level is not None:
if bc.inline_level == len(stack) - 1:
pass
elif bc.inline_level > len(stack) - 1:
stack.append([])
else:
while bc.inline_level + 1 < len(stack):
last = stack.pop()
stack[-1].append(cls(last, getpath(stack), storage))
stack[-1].append(bc)
so_far = []
stack = []
nothing_yet = True
for op in operations:
if op.name == 'debug_merge_point':
if so_far:
opc = cls.TraceForOpcode(so_far, storage, loopname)
if nothing_yet:
nothing_yet = False
for i in xrange(opc.inline_level + 1):
stack.append([])
append_to_res(opc)
if limit:
break
so_far = []
so_far.append(op)
if so_far:
append_to_res(cls.TraceForOpcode(so_far, storage, loopname))
# wrap stack back up
if not stack:
# no ops whatsoever
return cls([], getpath(stack), storage, inputargs)
while True:
next = stack.pop()
if not stack:
return cls(next, getpath(stack), storage, inputargs)
stack[-1].append(cls(next, getpath(stack), storage))
def getlinerange(self):
if self._linerange is None:
self._compute_linerange()
return self._linerange
linerange = property(getlinerange)
def getlineset(self):
if self._lineset is None:
self._compute_linerange()
return self._lineset
lineset = property(getlineset)
def has_valid_code(self):
for chunk in self.chunks:
if chunk.has_valid_code():
return True
return False
def _compute_linerange(self):
self._lineset = set()
minline = sys.maxint
maxline = -1
for chunk in self.chunks:
if chunk.is_bytecode and chunk.has_valid_code():
lineno = chunk.lineno
minline = min(minline, lineno)
maxline = max(maxline, lineno)
if chunk.line_starts_here or len(chunk.operations) > 1:
self._lineset.add(lineno)
if minline == sys.maxint:
minline = 0
maxline = 0
self._linerange = minline, maxline
def repr(self):
if self.filename is None:
return self.chunks[0].bytecode_name
return "%s, file '%s', line %d" % (self.name, self.filename,
self.startlineno)
def __repr__(self):
return "[%s]" % ", ".join([repr(chunk) for chunk in self.chunks])
def pretty_print(self, out):
print >>out, "Loop starting at %s in %s at %d" % (self.name,
self.filename, self.startlineno)
lineno = -1
for chunk in self.chunks:
if chunk.filename is not None and chunk.lineno != lineno:
lineno = chunk.lineno
source = chunk.getcode().source[chunk.lineno -
chunk.startlineno]
print >>out, " ", source
chunk.pretty_print(out)
def adjust_bridges(loop, bridges):
""" Slice given loop according to given bridges to follow. Returns a plain
list of operations.
"""
ops = loop.operations
res = []
i = 0
while i < len(ops):
op = ops[i]
if op.is_guard() and bridges.get('loop-' + hex(op.guard_no)[2:], None):
res.append(op)
i = 0
if hasattr(op.bridge, 'force_asm'):
op.bridge.force_asm()
ops = op.bridge.operations
else:
res.append(op)
i += 1
return res
def parse_addresses(part, callback=None):
hex_re = '0x(-?[\da-f]+)'
addrs = {}
if callback is None:
def callback(addr, stop_addr, bootstrap_addr, name, code_name):
addrs.setdefault(bootstrap_addr, []).append(name)
for entry in part:
m = re.search('has address %(hex)s to %(hex)s \(bootstrap %(hex)s' %
{'hex': hex_re}, entry)
if not m:
# a bridge
m = re.search('has address ' + hex_re + ' to ' + hex_re, entry)
addr = int(m.group(1), 16)
bootstrap_addr = addr
stop_addr = int(m.group(2), 16)
entry = entry.lower()
m = re.search('guard ' + hex_re, entry)
name = 'guard ' + m.group(1)
code_name = 'bridge'
else:
name = entry[:entry.find('(') - 1].lower()
addr = int(m.group(1), 16)
stop_addr = int(m.group(2), 16)
bootstrap_addr = int(m.group(3), 16)
code_name = entry[entry.find('(') + 1:m.span(0)[0] - 2]
callback(addr, stop_addr, bootstrap_addr, name, code_name)
return addrs
def import_log(logname, ParserCls=SimpleParser):
log = parse_log_file(logname)
addrs = parse_addresses(extract_category(log, 'jit-backend-addr'))
from rpython.jit.backend.tool.viewcode import World
world = World()
for entry in extract_category(log, 'jit-backend-dump'):
world.parse(entry.splitlines(True))
dumps = {}
for r in world.ranges:
if r.addr in addrs and addrs[r.addr]:
name = addrs[r.addr].pop(0) # they should come in order
data = r.data.encode('hex') # backward compatibility
dumps[name] = (world.backend_name, r.addr, data)
loops = []
cat = extract_category(log, 'jit-log-opt')
if not cat:
cat = extract_category(log, 'jit-log-rewritten')
if not cat:
cat = extract_category(log, 'jit-log-noopt')
for entry in cat:
parser = ParserCls(entry, None, {}, 'lltype', None,
nonstrict=True)
loop = parser.parse()
comm = loop.comment
comm = comm.lower()
if comm.startswith('# bridge'):
m = re.search('guard 0x(-?[\da-f]+)', comm)
name = 'guard ' + m.group(1)
elif "(" in comm:
name = comm[2:comm.find('(')-1]
else:
name = " ".join(comm[2:].split(" ", 2)[:2])
if name in dumps:
bname, start_ofs, dump = dumps[name]
loop.force_asm = (lambda dump=dump, start_ofs=start_ofs,
bname=bname, loop=loop:
parser.postprocess(loop, backend_tp=bname,
backend_dump=dump,
dump_start=start_ofs))
loops += split_trace(loop)
return log, loops
def split_trace(trace):
labels = [0]
if trace.comment and 'Guard' in trace.comment:
descrs = ['bridge %d' % int(
re.search('Guard 0x(-?[\da-f]+)', trace.comment).group(1), 16)]
else:
descrs = ['entry ' + re.search('Loop (\d+)', trace.comment).group(1)]
for i, op in enumerate(trace.operations):
if op.name == 'label':
labels.append(i)
descrs.append(op.descr)
labels.append(len(trace.operations) - 1)
parts = []
for i in range(len(labels) - 1):
start, stop = labels[i], labels[i+1]
part = copy(trace)
part.operations = trace.operations[start : stop + 1]
part.descr = descrs[i]
part.comment = trace.comment
parts.append(part)
return parts
def parse_log_counts(input, loops):
if not input:
return
lines = input[-1].splitlines()
mapping = {}
for loop in loops:
mapping[loop.descr] = loop
for line in lines:
if line:
num, count = line.split(':', 2)
mapping[num].count = int(count)
def mangle_descr(descr):
if descr.startswith('TargetToken('):
return descr[len('TargetToken('):-1]
if descr.startswith('<Guard'):
return 'bridge-' + str(int(descr[len('<Guard0x'):-1], 16))
if descr.startswith('<Loop'):
return 'entry-' + descr[len('<Loop'):-1]
return descr.replace(" ", '-')
if __name__ == '__main__':
import_log(sys.argv[1])
|
import matplotlib.pyplot as plt
if __name__ == '__main__':
data_x = [-2, -1, 0, 1, 2.5, 3.5, 4, 5, 6, 7]
data_y = [202.5, 122.5, 62.5, 22.5, 0, 10.0, 22.5, 62.5, 122.5, 202.5]
data_der = [-45, -35, -25, -15, 0, 10, 15, 25, 35, 45]
for x, y, d in zip(data_x, data_y, data_der):
plt.plot(x, y, marker='x', color='red' if d < 0 else 'blue')
plt.show()
|
"""
Train an m2vae model.
"""
import os
import sys
from collections import defaultdict
import contextlib
from itertools import combinations
import torch
import numpy as np
from tqdm import tqdm
import pretty_midi
import data
import mvae
import models
import util
import io_util
import wrappers
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def fast_f1(true, pred):
"""
This is faster than detaching to cpu and using
sklearn.metrics.f1_score
"""
true = true.view(-1)
pred = pred.view(-1)
hits = true == pred
misses = true != pred
true_positives = (pred * hits).sum().float()
false_positives = ((1 - pred) * (misses)).sum().float()
false_negatives = (pred * (misses)).sum().float()
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
try:
return (2 * precision * recall / (precision + recall))
except ZeroDivisionError:
return torch.zeros(())
def init_meters(*metrics):
"""Return an averagemeter for each metric passed"""
return {m: util.AverageMeter() for m in metrics}
def init_metrics():
metrics = defaultdict(list)
metrics['best_f1'] = -10
metrics['best_loss'] = float('inf')
metrics['best_epoch'] = 0
return metrics
def compute_metrics(meters):
"""
Compute averages from meters. Handle tensors vs floats (always return a
float)
"""
metrics = {m: vs.avg for m, vs in meters.items()}
metrics = {m: v if isinstance(v, float) else v.item() for m, v in metrics.items()}
return metrics
def compute_kl_annealing_factor(batch, epoch, n_batches, annealing_epochs):
return (float(batch + (epoch - 1) * n_batches + 1) /
float(annealing_epochs * n_batches))
def enumerate_combinations(n):
"""Enumerate entire pool of combinations.
We use this to define the domain of ELBO terms,
(the pool of 2^19 ELBO terms).
@param n: integer
number of features (19 for Celeb19)
@return: a list of ALL permutations
"""
combos = []
for i in range(2, n): # 1 to n - 1
_combos = list(combinations(range(n), i))
combos += _combos
combos_np = np.zeros((len(combos), n))
for i in range(len(combos)):
for idx in combos[i]:
combos_np[i][idx] = 1
combos_np = combos_np.astype(np.bool)
return combos_np
def sample_combinations(pool, random_state=None, size=1):
"""Return boolean list of which data points to use to compute a modality.
Ignore combinations that are all True or only contain a single True.
@param pool: np.array
enumerating all possible combinations.
@param size: integer (default: 1)
number of combinations to sample.
"""
if random_state is None:
random_state = np.random
n_modalities = pool.shape[1]
pool_size = len(pool)
pool_sums = np.sum(pool, axis=1)
pool_dist = np.bincount(pool_sums)
pool_space = np.where(pool_dist > 0)[0]
sample_pool = random_state.choice(pool_space, size, replace=True)
sample_dist = np.bincount(sample_pool)
if sample_dist.size < n_modalities:
zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int)
sample_dist = np.concatenate((sample_dist, zeros_pad))
sample_combo = []
for ix in range(n_modalities):
if sample_dist[ix] > 0:
pool_i = pool[pool_sums == ix]
combo_i = random_state.choice(range(pool_i.shape[0]),
size=sample_dist[ix],
replace=False)
sample_combo.append(pool_i[combo_i])
sample_combo = np.concatenate(sample_combo)
return sample_combo
def run(split, epoch, model, optimizer, loss, dataloaders, m_combos, args,
random_state=None):
"""
Run the model for a single epoch.
"""
training = split == 'train'
dataloader = dataloaders[split]
if training:
model.train()
context = contextlib.suppress
else:
model.eval()
context = torch.no_grad
report_f1 = (epoch % args.f1_interval == 0)
measures = ['loss', 'annealing_factor', 'recon_loss', 'kl_divergence']
if report_f1:
measures.append('f1')
meters = init_meters(*measures)
with context():
for batch_i, (tracks, notes) in enumerate(dataloader):
if args.no_kl:
annealing_factor = 0.0
elif training and epoch < args.annealing_epochs:
annealing_factor = compute_kl_annealing_factor(batch_i, epoch, len(dataloader),
args.annealing_epochs)
else:
annealing_factor = 1.0 # No annealing at val/test
# tracks: [batch_size, n_bar, n_timesteps, n_pitches, n_tracks]
tracks = tracks[:, :, :, :, :args.n_tracks]
if args.cuda:
tracks = tracks.cuda()
if notes is not None:
notes = notes.cuda()
batch_size = tracks.shape[0]
# Split tracks into list so we can zero them out
tracks = [tracks[:, :, :, :, i] for i in range(args.n_tracks)]
# Refresh the optimizer
if training:
optimizer.zero_grad()
total_loss = 0
total_recon_loss = 0
total_kl_divergence = 0
# Forward pass - all data
tracks_recon, mu, logvar = model(tracks, notes)
this_loss, recon_loss, kl_divergence = loss(tracks_recon, tracks, mu, logvar,
annealing_factor=annealing_factor)
total_loss += this_loss
total_recon_loss += recon_loss
total_kl_divergence += kl_divergence
if training:
# Additional forward passes
# Individual tracks
if not args.no_single_pass:
for i in range(args.n_tracks):
tracks_single = [tracks[t] if t == i else None
for t in range(args.n_tracks)]
tracks_single_recon, mu, logvar = model(tracks_single, notes)
this_loss, recon_loss, kl_divergence = loss(tracks_single_recon, tracks_single, mu, logvar,
annealing_factor=annealing_factor)
total_loss += this_loss
total_recon_loss += recon_loss
total_kl_divergence += kl_divergence
# Subsampled combinations of tracks
if args.approx_m > 0:
sample_combos = sample_combinations(m_combos, random_state=random_state, size=args.approx_m)
for sample_combo in sample_combos:
tracks_samp = [track if i else None for
i, track in zip(sample_combo, tracks)]
tracks_samp_recon, mu, logvar = model(tracks_samp, notes)
this_loss, recon_loss, kl_divergence = loss(tracks_samp_recon, tracks_samp, mu, logvar,
annealing_factor=annealing_factor)
total_loss += this_loss
total_recon_loss += recon_loss
total_kl_divergence += kl_divergence
# SGD step
total_loss.backward()
optimizer.step()
meters['loss'].update(total_loss, batch_size)
meters['annealing_factor'].update(annealing_factor, batch_size)
meters['recon_loss'].update(total_recon_loss, batch_size)
meters['kl_divergence'].update(total_kl_divergence, batch_size)
# All data - F1 score
if report_f1:
f1 = sum(
fast_f1(t.type(torch.ByteTensor), (tr > 0).type(torch.ByteTensor))
for t, tr in zip(tracks, tracks_recon))
f1 /= args.n_tracks
meters['f1'].update(f1, batch_size)
if training and batch_i % args.log_interval == 0:
logging.info('Epoch {}\t[{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_i * batch_size, len(dataloader.dataset),
100 * batch_i / len(dataloader), meters['loss'].avg, annealing_factor))
metrics = compute_metrics(meters)
if not report_f1:
metrics['f1'] = -1.0
logging.info('Epoch {}\t{} {}'.format(
epoch, split.upper(), ' '.join('{}: {:.4f}'.format(m, v) for m, v in metrics.items())
))
return metrics
if __name__ == '__main__':
args = io_util.parse_args('train', desc=__doc__)
# Make experiment directory
resumable = args.resume and util.is_resumable(args.exp_dir)
os.makedirs(args.exp_dir, exist_ok=True)
if not resumable:
util.save_args(args, args.exp_dir)
# Seed
random = np.random.RandomState(args.seed)
dataloaders, pos_prop = wrappers.load_data(args, random_state=random,
use_random_transpose=True)
model, optimizer, loss = wrappers.build_mvae(args, pos_prop=pos_prop)
# If resume, load metrics; otherwise init metrics
if resumable:
util.restore_checkpoint(model, optimizer, args.exp_dir)
metrics = util.load_metrics(args.exp_dir)
start_epoch = metrics['current_epoch'] + 1
print("Resuming from epoch {}".format(metrics['current_epoch']))
else:
metrics = init_metrics()
start_epoch = 1
if start_epoch > args.epochs:
raise RuntimeError("start_epoch {} > total epochs {}".format(
start_epoch, args.epochs))
# Enumerate subsampled modality combinations
m_combos = enumerate_combinations(args.n_tracks)
for epoch in range(start_epoch, args.epochs + 1):
train_metrics = run('train', epoch, model, optimizer, loss, dataloaders, m_combos, args, random_state=random)
val_metrics = run('val', epoch, model, optimizer, loss, dataloaders, m_combos, args, random_state=random)
for metric, value in train_metrics.items():
try:
metrics['train_{}'.format(metric)].append(value)
except KeyError:
pass # Could be missing due to resuming from older code
for metric, value in val_metrics.items():
try:
metrics['val_{}'.format(metric)].append(value)
except KeyError:
pass
metrics['current_epoch'] = epoch
is_best = val_metrics['f1'] > metrics['best_f1']
if is_best:
metrics['best_f1'] = val_metrics['f1']
metrics['best_loss'] = val_metrics['loss']
metrics['best_epoch'] = epoch
# Save model
util.save_checkpoint({
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'epoch': epoch
}, is_best, args.exp_dir)
# Save metrics
util.save_metrics(metrics, args.exp_dir)
|
#!/usr/bin/env python
"""
This file is part of the package FUNtoFEM for coupled aeroelastic simulation
and design optimization.
Copyright (C) 2015 Georgia Tech Research Corporation.
Additional copyright (C) 2015 Kevin Jacobson, Jan Kiviaho and Graeme Kennedy.
All rights reserved.
FUNtoFEM is licensed under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from .funtofem_driver import *
class FUNtoFEMnlbgs(FUNtoFEMDriver):
def __init__(self,solvers,comm,struct_comm,struct_master,aero_comm,aero_master,transfer_options=None,model=None,
theta_init=0.125,theta_min=0.01,theta_max=1.0):
"""
The FUNtoFEM driver for the Nonlinear Block Gauss-Seidel solvers for steady and unsteady coupled adjoint.
Parameters
----------
solvers: dict
the various disciplinary solvers
comm: MPI.comm
MPI communicator
transfer_options: dict
options of the load and displacement transfer scheme
model: :class:`~funtofem_model.FUNtoFEMmodel`
The model containing the design data
theta_init: float
Initial value of theta for the Aitken under-relaxation
theta_min: float
Minimum value of theta for the Aitken under-relaxation
"""
super(FUNtoFEMnlbgs,self).__init__(solvers,comm,struct_comm,struct_master,aero_comm,aero_master,transfer_options=transfer_options,model=model)
# Aitken acceleration settings
self.theta_init = theta_init
self.theta_min = theta_min
self.theta_max = theta_max
self.theta = []
self.aitken_init = None
self.aitken_vec = None
self.up_prev = None
def _initialize_adjoint_variables(self,scenario,bodies):
"""
Initialize the adjoint variables
Parameters
----------
scenario: :class:`~scenario.Scenario`
The scenario
bodies: :class:`~body.Body`
List of FUNtoFEM bodies.
"""
nfunctions = scenario.count_adjoint_functions()
nfunctions_total = len(scenario.functions)
for body in bodies:
body.psi_L = np.zeros((body.struct_nnodes*body.xfer_ndof,nfunctions),
dtype=TransferScheme.dtype)
body.psi_S = np.zeros((body.struct_nnodes*body.xfer_ndof,nfunctions),
dtype=TransferScheme.dtype)
body.struct_rhs = np.zeros((body.struct_nnodes*body.xfer_ndof,nfunctions),
dtype=TransferScheme.dtype)
body.dLdfa = np.zeros((body.aero_nnodes*3,nfunctions),
dtype=TransferScheme.dtype)
body.dGdua = np.zeros((body.aero_nnodes*3,nfunctions),
dtype=TransferScheme.dtype)
body.psi_D = np.zeros((body.aero_nnodes*3,nfunctions),
dtype=TransferScheme.dtype)
if body.shape:
body.aero_shape_term = np.zeros((body.aero_nnodes*3,nfunctions_total),dtype=TransferScheme.dtype)
body.struct_shape_term = np.zeros((body.struct_nnodes*body.xfer_ndof,nfunctions_total),dtype=TransferScheme.dtype)
def _solve_steady_forward(self,scenario,steps=None):
"""
Solve the aeroelastic forward analysis using the nonlinear block Gauss-Seidel algorithm.
Aitken under-relaxation for stabilty.
Parameters
----------
scenario: :class:`~scenario.Scenario`
The current scenario
steps: int
Number of iterations if not set by the model
"""
self.aitken_init = True
fail = 0
# Determine if we're using the scenario's number of steps or the argument
if steps is None:
if self.model:
steps = scenario.steps
else:
if self.comm.Get_rank()==0:
print("No number of steps given for the coupled problem. Using default (1000)")
steps = 1000
# Loop over the NLBGS steps
for step in range(1,steps+1):
# Transfer displacements
for body in self.model.bodies:
body.aero_disps = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.transfer.transferDisps(body.struct_disps, body.aero_disps)
# Take a step in the flow solver
fail = self.solvers['flow'].iterate(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Flow solver returned fail flag')
return fail
# Transfer the loads
for body in self.model.bodies:
body.struct_loads = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.transferLoads(body.aero_loads, body.struct_loads)
# Take a step in the FEM model
fail = self.solvers['structural'].iterate(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Structural solver returned fail flag')
return fail
# Under-relaxation for solver stability
self._aitken_relax()
# end solve loop
return fail
def _solve_steady_adjoint(self,scenario):
"""
Solve the aeroelastic adjoint analysis using the linear block Gauss-Seidel algorithm.
Aitken under-relaxation for stabilty.
Parameters
----------
scenario: :class:`~scenario.Scenario`
The current scenario
"""
fail = 0
self.aitken_init = True
# how many steps to take
steps = scenario.steps
# Load the current state
for body in self.model.bodies:
aero_disps = np.zeros(body.aero_disps.size,dtype=TransferScheme.dtype)
body.transfer.transferDisps(body.struct_disps, aero_disps)
struct_loads = np.zeros(body.struct_loads.size,dtype=TransferScheme.dtype)
body.transfer.transferLoads(body.aero_loads, struct_loads)
# Initialize the adjoint variables
nfunctions = scenario.count_adjoint_functions()
self._initialize_adjoint_variables(scenario,self.model.bodies)
# loop over the adjoint NLBGS solver
for step in range(1,steps+1):
# Get force terms for the flow solver
for body in self.model.bodies:
for func in range(nfunctions):
# 'Solve' for load transfer adjoint variables
body.psi_L[:,func] = body.psi_S[:,func]
# Transform load transfer adjoint variables using transpose Jacobian from
# funtofem: dLdfA^T * psi_L = dDdus * psi_L
psi_L_r = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.transfer.applydDduS(body.psi_L[:, func].copy(order='C'), psi_L_r)
body.dLdfa[:,func] = psi_L_r
fail = self.solvers['flow'].iterate_adjoint(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Flow solver returned fail flag')
return fail
# Get the structural adjoint rhs
for body in self.model.bodies:
for func in range(nfunctions):
# calculate dDdu_s^T * psi_D
psi_D_product = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.psi_D = - body.dGdua
body.transfer.applydDduSTrans(body.psi_D[:, func].copy(order='C'), psi_D_product)
# calculate dLdu_s^T * psi_L
psi_L_product = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.applydLduSTrans(body.psi_L[:, func].copy(order='C'), psi_L_product)
body.struct_rhs[:,func] = -psi_D_product - psi_L_product
# take a step in the structural adjoint
fail = self.solvers['structural'].iterate_adjoint(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Structural solver returned fail flag')
return fail
self._aitken_adjoint_relax(scenario)
# end of solve loop
self._extract_coordinate_derivatives(scenario,self.model.bodies,steps)
return 0
def _solve_unsteady_forward(self,scenario,steps=None):
"""
This function solves the unsteady forward problem using NLBGS without FSI subiterations
Parameters
----------
scenario: :class:`~scenario.Scenario`
the current scenario
steps: int
number of time steps if not using the value defined in the scenario
Returns
-------
fail: int
fail flag for the coupled solver
"""
fail = 0
if not steps:
if not self.fakemodel:
steps = scenario.steps
else:
if self.comm.Get_rank()==0:
print("No number of steps given for the coupled problem. Using default (1000)")
steps = 1000
for step in range(1,steps+1):
# Transfer structural displacements to aerodynamic surface
for body in self.model.bodies:
body.aero_disps = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.transfer.transferDisps(body.struct_disps, body.aero_disps)
if ('rigid' in body.motion_type and
'deform' in body.motion_type):
rotation = np.zeros(9,dtype=TransferScheme.dtype)
translation = np.zeros(3,dtype=TransferScheme.dtype)
u = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.rigid_transform = np.zeros((4,4),dtype=TransferScheme.dtype)
body.transfer.transformEquivRigidMotion(body.aero_disps,rotation,translation,u)
body.rigid_transform[:3,:3] = rotation.reshape((3,3,),order='F')
body.rigid_transform[:3, 3] = translation
body.rigid_transform[-1,-1] = 1.0
body.aero_disps = u.copy()
elif('rigid' in body.motion_type):
transform = self.solvers['structural'].get_rigid_transform(body)
fail = self.solvers['flow'].iterate(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Flow solver returned fail flag')
return fail
# Transfer loads from fluid and get loads on structure
for body in self.model.bodies:
body.struct_loads = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.transferLoads(body.aero_loads, body.struct_loads)
# Take a step in the FEM model
fail = self.solvers['structural'].iterate(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Structural solver returned fail flag')
return fail
# end solve loop
return fail
def _solve_unsteady_adjoint(self,scenario):
"""
Solves the unsteady adjoint problem using LBGS without FSI subiterations
Parameters
----------
scenario: :class:`~scenario.Scenario`
the current scenario
steps: int
number of time steps
Returns
-------
fail: int
fail flag
"""
# Initialize the adjoint variables
nfunctions = scenario.count_adjoint_functions()
self._initialize_adjoint_variables(scenario,self.model.bodies)
steps = scenario.steps
for rstep in range(1,steps+1):
step = steps - rstep + 1
self.solvers['flow'].set_states(scenario,self.model.bodies,step)
# Due to the staggering, we linearize the transfer about u_s^(n-1)
self.solvers['structural'].set_states(scenario,self.model.bodies,step-1)
for body in self.model.bodies:
body.aero_disps = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.transfer.transferDisps(body.struct_disps,body.aero_disps)
struct_loads = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.transferLoads(body.aero_loads,struct_loads)
if ('rigid' in body.motion_type and
'deform' in body.motion_type):
rotation = np.zeros(9,dtype=TransferScheme.dtype)
translation = np.zeros(3,dtype=TransferScheme.dtype)
u = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.rigid_transform = np.zeros((4,4),dtype=TransferScheme.dtype)
body.transfer.transformEquivRigidMotion(body.aero_disps,rotation,translation,u)
body.rigid_transform[:3,:3] = rotation.reshape((3,3,),order='F')
body.rigid_transform[:3, 3] = translation
body.rigid_transform[-1,-1] = 1.0
body.global_aero_disps = body.aero_disps[:]
body.aero_disps = u.copy()
# take a step in the structural adjoint
fail = self.solvers['structural'].iterate_adjoint(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Structural solver returned fail flag')
return fail
for body in self.model.bodies:
for func in range(nfunctions):
# 'Solve' for load transfer adjoint variables
body.psi_L[:,func] = body.psi_S[:,func]
# Transform load transfer adjoint variables using transpose Jacobian from
# funtofem: dLdfA^T * psi_L
psi_L_r = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
body.transfer.applydDduS(body.psi_L[:, func].copy(order='C'), psi_L_r)
body.dLdfa[:,func] = psi_L_r
fail = self.solvers['flow'].iterate_adjoint(scenario,self.model.bodies,step)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Flow solver returned fail flag')
return fail
# From the flow grid adjoint, get to the displacement adjoint
for body in self.model.bodies:
for func in range(nfunctions):
if body.motion_type == 'deform':
# displacement adjoint equation
body.psi_D[:,func] = - body.dGdua[:,func]
elif 'rigid' in body.motion_type and 'deform' in body.motion_type:
# solve the elastic deformation adjoint
psi_E = np.zeros(body.aero_nnodes*3,dtype=TransferScheme.dtype)
tmt = np.linalg.inv(np.transpose(body.rigid_transform))
for node in range(body.aero_nnodes):
for i in range(3):
psi_E[3*node+i] = ( tmt[i,0] * body.dGdua[3*node+0,func]
+ tmt[i,1] * body.dGdua[3*node+1,func]
+ tmt[i,2] * body.dGdua[3*node+2,func]
+ tmt[i,3] )
# get the product dE/dT^T psi_E
dEdTmat = np.zeros((3,4),dtype=TransferScheme.dtype)
for n in range(body.aero_nnodes):
for i in range(3):
for j in range(4):
if j < 3:
dEdTmat[i,j] += -(body.aero_X[3*n+j]+body.aero_disps[3*n+j]) * psi_E[3*n+i]
else:
dEdTmat[i,j] += - psi_E[3*n+i]
dEdT = dEdTmat.flatten(order='F')
dEdT = self.comm.allreduce(dEdT)
# solve the rigid transform adjoint
psi_R = np.zeros(12,dtype=TransferScheme.dtype)
dGdT_func = body.dGdT[:,:,func]
dGdT = dGdT_func[:3,:4].flatten(order='F')
psi_R = -dGdT - dEdT
# now solve the displacement adjoint
dRduA = np.zeros(3*body.aero_nnodes,dtype=TransferScheme.dtype)
body.transfer.applydRduATrans(psi_R, dRduA)
body.psi_D[:,func] = - psi_E - dRduA
# form the RHS for the structural adjoint equation on the next reverse step
for func in range(nfunctions):
# calculate dDdu_s^T * psi_D
psi_D_product = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.applydDduSTrans(body.psi_D[:,func].copy(order='C'), psi_D_product)
# calculate dLdu_s^T * psi_L
psi_L_product = np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype)
body.transfer.applydLduSTrans(body.psi_L[:,func].copy(order='C'), psi_L_product)
body.struct_rhs[:,func] = -psi_D_product - psi_L_product
# extract and accumulate coordinate derivative every step
self._extract_coordinate_derivatives(scenario,self.model.bodies,step)
# end of solve loop
# evaluate the initial conditions
fail = self.solvers['flow'].iterate_adjoint(scenario,self.model.bodies,step=0)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Flow solver returned fail flag')
return fail
fail = self.solvers['structural'].iterate_adjoint(scenario,self.model.bodies,step=0)
fail = self.comm.allreduce(fail)
if fail != 0:
if self.comm.Get_rank() == 0:
print('Structural solver returned fail flag')
return fail
# extract coordinate derivative term from initial condition
self._extract_coordinate_derivatives(scenario,self.model.bodies,step=0)
fail = 0
return fail
def _aitken_relax(self):
if self.aitken_init:
self.aitken_init = False
# initialize the 'previous update' to zero
self.up_prev = []
self.aitken_vec = []
self.theta = []
for ind, body in enumerate(self.model.bodies):
self.up_prev.append(np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype))
self.aitken_vec.append(np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype))
self.theta.append(self.theta_init)
# do the Aitken update
for ibody, body in enumerate(self.model.bodies):
if body.struct_nnodes > 0:
up = body.struct_disps - self.aitken_vec[ibody]
norm2 = (np.linalg.norm(up - self.up_prev[ibody])**2.0)
# Only update theta if the displacements changed
if norm2 > 1e-13:
self.theta[ibody] *= 1.0 - (up - self.up_prev[ibody]).dot(up)/norm2
self.theta[ibody] = np.max((np.min((self.theta[ibody],self.theta_max)),self.theta_min))
# handle the min/max for complex step
if type(self.theta[ibody]) == np.complex128 or type(self.theta[ibody]) == complex:
self.theta[ibody] = self.theta[ibody].real + 0.0j
self.aitken_vec[ibody] += self.theta[ibody] * up
self.up_prev[ibody] = up[:]
body.struct_disps = self.aitken_vec[ibody]
return
def _aitken_adjoint_relax(self,scenario):
nfunctions = scenario.count_adjoint_functions()
if self.aitken_init:
self.aitken_init = False
# initialize the 'previous update' to zero
self.up_prev = []
self.aitken_vec = []
self.theta = []
for ibody, body in enumerate(self.model.bodies):
up_prev_body = []
aitken_vec_body = []
theta_body = []
for func in range(nfunctions):
up_prev_body.append(np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype))
aitken_vec_body.append(np.zeros(body.struct_nnodes*body.xfer_ndof,dtype=TransferScheme.dtype))
theta_body.append(self.theta_init)
self.up_prev.append(up_prev_body)
self.aitken_vec.append(aitken_vec_body)
self.theta.append(theta_body)
# do the Aitken update
for ibody, body in enumerate(self.model.bodies):
if body.struct_nnodes > 0:
for func in range(nfunctions):
up = body.psi_S[:,func] - self.aitken_vec[ibody][func]
norm2 = np.linalg.norm(up - self.up_prev[ibody][func])**2.0
# Only update theta if the vector changed
if norm2 > 1e-13:
self.theta[ibody][func] *= 1.0 - (up - self.up_prev[ibody][func]).dot(up)/np.linalg.norm(up - self.up_prev[ibody][func])**2.0
self.theta[ibody][func] = np.max((np.min((self.theta[ibody][func],self.theta_max)),self.theta_min))
self.aitken_vec[ibody][func] += self.theta[ibody][func] * up
self.up_prev[ibody][func] =up[:]
body.psi_S[:,func] = self.aitken_vec[ibody][func][:]
return self.aitken_vec
|
import pygame,sys
import libtcodpy as libtcod
#game fi#les
import constants
#
#( ____ \\__ __/( ____ )|\ /|( ____ \\__ __/
#| ( \/ ) ( | ( )|| ) ( || ( \/ ) (
#| (_____ | | | (____)|| | | || | | |
#(_____ ) | | | __)| | | || | | |
# ) | | | | (\ ( | | | || | | |
#/\____) | | | | ) \ \__| (___) || (____/\ | |
#\_______) )_( |/ \__/(_______)(_______/ )_(
#
class struc_Tile:
def __init__(self, block_path):
self.block_path = block_path
self.explored = False
class struc_Assets:
def __init__(self):
#sprties
self.charspritesheet = obj_Spritesheet("data/reptiles.png")
self.enemyspritesheet = obj_Spritesheet("data/enemys.png")
self.A_PLAYER = self.charspritesheet.get_animation ('o',5 ,16 ,16 ,2 , (32, 32))
self.A_ENEMY = self.enemyspritesheet.get_animation('k',1 ,16 ,16 ,2 ,(32, 32))
self.S_WALL = pygame.image.load("data/wall.jpg")
self.S_WALLEXPLORED = pygame.image.load("data/wallunseen.png")
self.S_FLOOR = pygame.image.load("data/floor.jpg")
self.S_FLOOREXPLORED = pygame.image.load("data/floorunseen.png")
#FONTS
self.FONT_DEBUG_MESSAGE = pygame.font.Font("data/joystix.ttf", 16)
self.FONT_MESSAGE_TEXT = pygame.font.Font("data/joystix.ttf", 12)
# _______ ______ _________ _______ _______ _________ _______
#( ___ )( ___ \ \__ _/( ____ \( ____ \\__ __/( ____ \
#| ( ) || ( ) ) ) ( | ( \/| ( \/ ) ( | ( \/
#| | | || (__/ / | | | (__ | | | | | (_____
#| | | || __ ( | | | __) | | | | (_____ )
#| | | || ( \ \ | | | ( | | | | ) |
#| (___) || )___) )|\_) ) | (____/\| (____/\ | | /\____) |
#(_______)|/ \___/ (____/ (_______/(_______/ )_( \_______)
#
class obj_Actor:
def __init__(self, x, y, name_object, animation, animation_speed = .5, creature = None, ai = None, container = None):
self.x = x
self.y = y
self.animation = animation #list of images
self.animation_speed = animation_speed / 1.0 # in seconds
#animation flicker speed
self.flicker_speed = self.animation_speed / len(self.animation)
self.flicker_timer = 0.0
self.sprite_image = 0
self.creature = creature
if self.creature:
self.creature.owner = self
self.ai = ai
if self.ai:
self.ai.owner = self
self.container = container
if self.container:
self.container.owner = self
def draw(self):
is_visible = libtcod.map_is_in_fov(FOV_MAP, self.x, self.y)
if is_visible:
if len(self.animation) == 1:
SURFACE_MAIN.blit(self.animation[0], (self.x*constants.CELL_WIDTH, self.y*constants.CELL_HEIGHT))
elif len(self.animation) > 1:
if CLOCK.get_fps() > 0.0:
self.flicker_timer += 1 / CLOCK.get_fps()
if self.flicker_timer >= self.flicker_speed:
self.flicker_timer = 0.0
if self.sprite_image >= len(self.animation) -1:
self.sprite_image = 0
else:
self.sprite_image += 1
SURFACE_MAIN.blit(self.animation[self.sprite_image], (self.x*constants.CELL_WIDTH, self.y*constants.CELL_HEIGHT))
class obj_Game:
def __init__(self):
self.current_map = map_create()
self.current_objects = []
self.message_history = []
class obj_Spritesheet:
#used to grab images out of a sprite sheet
def __init__(self, file_name):
#load the sprite sheet
self.sprite_sheet = pygame.image.load(file_name).convert()
self.tiledict = {'a': 1, 'b' : 2, 'c' : 3, 'd' : 4, 'e' : 5, 'f' : 6,
"g" : 7, "h" : 8, "i" : 9, "j" : 10, "k" : 11, "l" : 12,
"m" : 13, "n" : 14, "o" : 15, "p" : 16}
def get_image(self, column, row, width = constants.CELL_WIDTH, height = constants.CELL_HEIGHT,
scale = None):
### scale is s tuple
image_list = []
image = pygame.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0, 0), (self.tiledict[column]*width, row * height, width, height))
image.set_colorkey(constants.COLOR_BLACK)
if scale:
(new_w, new_h) = scale
image = pygame.transform.scale(image, (new_w, new_h))
image_list.append(image)
return image_list
def get_animation(self, column, row, width = constants.CELL_WIDTH, height = constants.CELL_HEIGHT, num_sprites = 1 , scale = None):
### scale is s tuple
image_list = []
for i in range(num_sprites):
#create blank image
image = pygame.Surface([width, height]).convert()
#copy image from sheet onto blank
image.blit(self.sprite_sheet, (0, 0), (self.tiledict[column] * width + (width*i), row * height, width, height))
#set tranparency to black
image.set_colorkey(constants.COLOR_BLACK)
if scale:
(new_w, new_h) = scale
image = pygame.transform.scale(image, (new_w, new_h))
image_list.append(image)
return image_list
#
#( ____ \( ___ )( )( ____ )( ___ )( ( /|( ____ \( ( /|
#| ( \/| ( ) || () () || ( )|| ( ) || \ ( || ( \/| \ ( |
#| | | | | || || || || (____)|| | | || \ | || (__ | \ | |
#| | | | | || |(_)| || _____)| | | || (\ \) || __) | (\ \) |
#| | | | | || | | || ( | | | || | \ || ( | | \ |
#| (____/\| (___) || ) ( || ) | (___) || ) \ || (____/\| ) \ |
#(_______/(_______)|/ \||/ (_______)|/ )_)(_______/|/ )_)
#
class com_Creature:
#creatures have health can attack other objects and can die
def __init__(self, name_instance, hp=10, death_function = None):
self.name_instance = name_instance
self.maxhp = hp
self.hp = hp
self.death_function = death_function
def move(self, dx, dy):
tile_is_wall = (GAME.current_map[self.owner.x + dx][self.owner.y + dy].block_path == True)
target = map_check_for_creatures(self.owner.x + dx, self.owner.y + dy, self.owner)
#
if target:
self.attack(target, 3)
if not tile_is_wall and target is None:
self.owner.x += dx
self.owner.y += dy
def attack(self, target, damage):
#print (self.name_instance + " attacks " + target.creature.name_instance + " for " + str(damage) + " damage!")
game_message(self.name_instance + " attacks " + target.creature.name_instance + " for " + str(damage) + " damage!", constants.COLOR_WHITE)
target.creature.take_damage(damage)
def take_damage(self, damage):
self.hp -= damage
#print (self.name_instance + "'s health is " + str(self.hp) + "/" + str(self.maxhp))
game_message(self.name_instance + "'s health is " + str(self.hp) + "/" + str(self.maxhp), constants.COLOR_RED)
if self.hp <= 0:
if self.death_function is not None:
self.death_function(self.owner)
#.current_maps com_Item:
class com_Container:
def __init__(self, volume = 10.0, inventory = []):
self.inventory = inventory
self.base_volume = volume
# get_names_inventory()
#get_volume_container()
#get_current_weight()
class com_Item:
def __init__(self, weight = 0.0, volume = 0.0):
self.weight = weight
self.volume = volume
## pick_up_Item()
def pick_up(self, actor):
if actor.container:
pass
## drop_Item()
## use_Item()
# ___ __
# / \ | |
# / ^ \ | |
# / /_\ \ | |
# / _____ \ | |
# /__/ \__\ |__|
#
class ai_Test:
#once per turn, execute
def take_turn(self):
self.owner.creature.move(libtcod.random_get_int(0,-1, 1,),libtcod.random_get_int(0,-1, 1,))
def death_monster(monster):
#on death most monster stop moving
#print (monster.creature.name_instance + " is dead!")
game_message(monster.creature.name_instance + " is dead!", constants.COLOR_GREY)
monster.creature = None
monster.ai = None
#_______ _______ _______
#( )( ___ )( ____ )
#| () () || ( ) || ( )|
#| || || || (___) || (____)|
#| |(_)| || ___ || _____)
#| | | || ( ) || (
#| ) ( || ) ( || )
#|/ \||/ \||/
#
def map_create():
new_map = [[struc_Tile(False) for y in range(0,constants.MAP_HEIGHT)]for x in range(0,constants.MAP_WIDTH)]
new_map[10][10].block_path = True
new_map[10][15].block_path = True
for x in range(constants.MAP_WIDTH):
new_map[x][0].block_path = True
new_map[x][constants.MAP_HEIGHT-1].block_path = True
for y in range(constants.MAP_HEIGHT):
new_map[0][y].block_path = True
new_map[constants.MAP_WIDTH-1][y].block_path = True
map_make_fov(new_map)
return new_map
def map_check_for_creatures(x, y, exclude_object = None):
target = None
if exclude_object:
#check to find creature at that location that isnt excluded
for object in GAME.current_objects:
if (object is not exclude_object and
object.x == x and
object.y == y and
object.creature):
target = object
if target:
return target
else:
#check to find creature at that location
for object in GAME.current_objects:
if (object.x == x and
object.y == y and
object.creature):
target = object
if target:
return target
def map_make_fov(incoming_map):
global FOV_MAP
FOV_MAP = libtcod.map_new(constants.MAP_HEIGHT, constants.MAP_HEIGHT)
for y in range(constants.MAP_HEIGHT):
for x in range(constants.MAP_WIDTH):
libtcod.map_set_properties(FOV_MAP, x, y, not incoming_map[x][y].block_path, not incoming_map[x][y].block_path)
def map_calculate_fov():
global FOV_CALCULATE
if FOV_CALCULATE:
FOV_CALCULATE = False
libtcod.map_compute_fov(FOV_MAP, PLAYER.x, PLAYER.y, constants.TORCH_RADIUS, constants.FOV_LIGHT_WALLS, constants.FOV_ALGO)
# ______ _______ _______
#( __ \ ( ____ )( ___ )|\ /|
#| ( \ )| ( )|| ( ) || ) ( |
#| | ) || (____)|| (___) || | _ | |
#| | | || __)| ___ || |( )| |
#| | ) || (\ ( | ( ) || || || |
#| (__/ )| ) \ \__| ) ( || () () |
#(______/ |/ \__/|/ \|(_______)
#
def draw_game():
#clear the surface
SURFACE_MAIN.fill(constants.COLOR_DEFAULT_BG)
#draw the map
draw_map(GAME.current_map)
#draw the character
#SURFACE_MAIN.blit(constants.S_PLAYER,(200,200))
#ENEMY.draw()
#PLAYER.draw()
#Draw all objects
for obj in GAME.current_objects:
obj.draw()
draw_debug()
draw_messages()
#Player is drawn last so it is on top layer of display - play is always on top
#update the display
pygame.display.flip()
def draw_map(map_to_draw):
for x in range(0,constants.MAP_WIDTH):
for y in range(0,constants.MAP_HEIGHT):
is_visible = libtcod.map_is_in_fov(FOV_MAP, x, y)
if is_visible:
map_to_draw[x][y].explored = True
if map_to_draw[x][y].block_path == True:
#draw a wall
SURFACE_MAIN.blit(ASSETS.S_WALL, (x*constants.CELL_WIDTH, y*constants.CELL_HEIGHT))
else:
#draw floor
SURFACE_MAIN.blit(ASSETS.S_FLOOR, (x*constants.CELL_WIDTH, y*constants.CELL_HEIGHT))
elif map_to_draw[x][y].explored:
if map_to_draw[x][y].block_path == True:
#draw a wall
SURFACE_MAIN.blit(ASSETS.S_WALLEXPLORED, (x*constants.CELL_WIDTH, y*constants.CELL_HEIGHT))
else:
#draw floor
SURFACE_MAIN.blit(ASSETS.S_FLOOREXPLORED, (x*constants.CELL_WIDTH, y*constants.CELL_HEIGHT))
def draw_debug():
draw_text(SURFACE_MAIN, "fps: "+ str(int(CLOCK.get_fps())), (0,0), constants.COLOR_WHITE, constants.COLOR_BLACK)
def draw_messages():
if len(GAME.message_history) <= constants.NUM_MESSAGES:
to_draw = GAME.message_history#(constants.NUM_MESSAGES)
else:
to_draw = GAME.message_history[-constants.NUM_MESSAGES:]
text_height = helper_text_height(ASSETS.FONT_MESSAGE_TEXT)
start_y = (constants.MAP_HEIGHT * constants.CELL_HEIGHT - (constants.NUM_MESSAGES * text_height)) -5
i = 0
for message,color in to_draw:
draw_text(SURFACE_MAIN, message, (0, start_y + (i * text_height)), color, constants.COLOR_BLACK)
i += 1
def draw_text(display_surface, text_to_display, T_coords, text_color, back_color = None):
#T stands for touple, this function takes in text and displayes it on display_surface
text_surf, text_rect = helper_text_objects(text_to_display, text_color, back_color)
text_rect.topleft = T_coords
display_surface.blit(text_surf, text_rect)
# __ __ _______ __ .______ _______ .______
#| | | | | ____|| | | _ \ | ____|| _ \
#| |__| | | |__ | | | |_) | | |__ | |_) |
#| __ | | __| | | | ___/ | __| | /
#| | | | | |____ | `----.| | | |____ | |\ \----.
#|__| |__| |_______||_______|| _| |_______|| _| `._____|
#
def helper_text_objects(incoming_text, incoming_color, incoming_bg):
if incoming_bg:
Text_surface = ASSETS.FONT_DEBUG_MESSAGE.render(incoming_text, False, incoming_color, incoming_bg)
else:
Text_surface = ASSETS.FONT_DEBUG_MESSAGE.render(incoming_text, False, incoming_color)
return Text_surface, Text_surface.get_rect()
def helper_text_height(font):
font_object = font.render('a', False ,(0 ,0, 0))
font_rect = font_object.get_rect()
return font_rect.height
########################################################################
# _______ _______ _______ _______ _ _______ _______ _______
# ( ____ \( ___ )( )( ____ \ ( \ ( ___ )( ___ )( ____ )
# | ( \/| ( ) || () () || ( \/ | ( | ( ) || ( ) || ( )|
# | | | (___) || || || || (__ | | | | | || | | || (____)|
# | | ____ | ___ || |(_)| || __) | | | | | || | | || _____)
# | | \_ )| ( ) || | | || ( | | | | | || | | || (
# | (___) || ) ( || ) ( || (____/\ | (____/\| (___) || (___) || )
# (_______)|/ \||/ \|(_______/ (_______/(_______)(_______)|/
#
########################################################################
def game_main_loop():
#main game loop
game_quit = False
player_action = "no-action"
while not game_quit:
#player action definition
#handle player input
player_action = game_handle_keys()
map_calculate_fov()
if player_action == "QUIT":
game_quit = True
if player_action != "no-action":
for obj in GAME.current_objects:
if obj.ai:
obj.ai.take_turn()
#draw the game
draw_game()
CLOCK.tick(constants.GAME_FPS)#
#quit the game
pygame.quit()
sys.exit()
# _______ _______ _______ _______ _________ _ __________________
#( ____ \( ___ )( )( ____ \ \__ __/( ( /|\__ __/\__ __/
#| ( \/| ( ) || () () || ( \/ ) ( | \ ( | ) ( ) (
#| | | (___) || || || || (__ | | | \ | | | | | |
#| | ____ | ___ || |(_)| || __) | | | (\ \) | | | | |
#| | \_ )| ( ) || | | || ( | | | | \ | | | | |
#| (___) || ) ( || ) ( || (____/\ ___) (___| ) \ |___) (___ | |
#(_______)|/ \||/ \|(_______/ \_______/|/ )_)\_______/ )_(
#
def game_initialize():
global SURFACE_MAIN, GAME, CLOCK, FOV_CALCULATE, PLAYER, ENEMY, ASSETS
#initalizes the main window in pygame
pygame.init()
#SURFACE_MAIN = pygame.display.set_mode((constants.GAME_WIDTH,constants.GAME_HEIGHT))
SURFACE_MAIN = pygame.display.set_mode((constants.MAP_WIDTH*constants.CELL_WIDTH, constants.MAP_HEIGHT*constants.CELL_HEIGHT))
GAME = obj_Game()
CLOCK = pygame.time.Clock()
pygame.display.set_caption('Test Game!')
#GAME.current_map = map_create()
#GAME.message_history = []
#test messages
#game_message("test message", constants.COLOR_WHITE)
#game_message("test message2", constants.COLOR_RED)
#game_message("test message3", constants.COLOR_GREY)
#game_message("test message4", constants.COLOR_WHITE)
FOV_CALCULATE = True
ASSETS = struc_Assets()
creature_com1 = com_Creature("greg")
PLAYER = obj_Actor(1, 1, "python", ASSETS.A_PLAYER ,animation_speed = 1.0, creature = creature_com1)
creature_com2 = com_Creature("jackie", death_function = death_monster)
ai_com = ai_Test()
ENEMY = obj_Actor(15, 15, "crab", ASSETS.A_ENEMY, animation_speed = 1.0, creature = creature_com2, ai = ai_com)
#ai_com2 = ai_Test()
#creature_com3 = com_Creature("jackie2", death_function = death_monster )
#ENEMY2 = obj_Actor(5, 10, "crab", constants.S_ENEMY, creature = creature_com3, ai = ai_com2)
GAME.current_objects = [PLAYER, ENEMY]
def game_handle_keys():
global FOV_CALCULATE
#get player input
events_list = pygame.event.get()
#process input
for event in events_list:
if event.type == pygame.QUIT:
return "QUIT"
#game_quit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
PLAYER.creature.move(0,-1)
FOV_CALCULATE = True
return "player_moved"
if event.key == pygame.K_DOWN:
PLAYER.creature.move(0,1)
FOV_CALCULATE = True
return "player_moved"
if event.key == pygame.K_LEFT:
PLAYER.creature.move(-1,0)
FOV_CALCULATE = True
return "player_moved"
if event.key == pygame.K_RIGHT:
PLAYER.creature.move(1,0)
FOV_CALCULATE = True
return "player_moved"
return "no-action"
def game_message(game_msg, msg_color):#T means tuple
GAME.message_history.append((game_msg, msg_color))
# _______ _______ _________ _ _ _______ _______ _______
#( )( ___ )\__ __/( ( /| ( \ ( ___ )( ___ )( ____ )
#| () () || ( ) | ) ( | \ ( | | ( | ( ) || ( ) || ( )|
#| || || || (___) | | | | \ | | | | | | | || | | || (____)|
#| |(_)| || ___ | | | | (\ \) | | | | | | || | | || _____)
#| | | || ( ) | | | | | \ | | | | | | || | | || (
#| ) ( || ) ( |___) (___| ) \ | | (____/\| (___) || (___) || )
#|/ \||/ \|\_______/|/ )_) (_______/(_______)(_______)|/
if __name__ == '__main__':
game_initialize()
game_main_loop()
|
# coding=utf8
#
# (c) Simon Marlow 2002
#
import io
import shutil
import os
import re
import traceback
import time
import datetime
import copy
import glob
import sys
from math import ceil, trunc
from pathlib import PurePath
import collections
import subprocess
from testglobals import config, ghc_env, default_testopts, brokens, t
from testutil import strip_quotes, lndir, link_or_copy_file, passed, failBecause, str_fail, str_pass
from cpu_features import have_cpu_feature
import perf_notes as Perf
from perf_notes import MetricChange
extra_src_files = {'T4198': ['exitminus1.c']} # TODO: See #12223
global pool_sema
if config.use_threads:
import threading
pool_sema = threading.BoundedSemaphore(value=config.threads)
global wantToStop
wantToStop = False
def stopNow():
global wantToStop
wantToStop = True
def stopping():
return wantToStop
# Options valid for the current test only (these get reset to
# testdir_testopts after each test).
global testopts_local
if config.use_threads:
testopts_local = threading.local()
else:
class TestOpts_Local:
pass
testopts_local = TestOpts_Local()
def getTestOpts():
return testopts_local.x
def setLocalTestOpts(opts):
global testopts_local
testopts_local.x=opts
def isCompilerStatsTest():
opts = getTestOpts()
return bool(opts.is_compiler_stats_test)
def isStatsTest():
opts = getTestOpts()
return bool(opts.stats_range_fields)
# This can be called at the top of a file of tests, to set default test options
# for the following tests.
def setTestOpts( f ):
global thisdir_settings
thisdir_settings = [thisdir_settings, f]
# -----------------------------------------------------------------------------
# Canned setup functions for common cases. eg. for a test you might say
#
# test('test001', normal, compile, [''])
#
# to run it without any options, but change it to
#
# test('test001', expect_fail, compile, [''])
#
# to expect failure for this test.
#
# type TestOpt = (name :: String, opts :: Object) -> IO ()
def normal( name, opts ):
return;
def skip( name, opts ):
opts.skip = True
def expect_fail( name, opts ):
# The compiler, testdriver, OS or platform is missing a certain
# feature, and we don't plan to or can't fix it now or in the
# future.
opts.expect = 'fail';
def reqlib( lib ):
return lambda name, opts, l=lib: _reqlib (name, opts, l )
def stage1(name, opts):
# See Note [Why is there no stage1 setup function?]
framework_fail(name, 'stage1 setup function does not exist',
'add your test to testsuite/tests/stage1 instead')
# Note [Why is there no stage1 setup function?]
#
# Presumably a stage1 setup function would signal that the stage1
# compiler should be used to compile a test.
#
# Trouble is, the path to the compiler + the `ghc --info` settings for
# that compiler are currently passed in from the `make` part of the
# testsuite driver.
#
# Switching compilers in the Python part would be entirely too late, as
# all ghc_with_* settings would be wrong. See config/ghc for possible
# consequences (for example, config.run_ways would still be
# based on the default compiler, quite likely causing ./validate --slow
# to fail).
#
# It would be possible to let the Python part of the testsuite driver
# make the call to `ghc --info`, but doing so would require quite some
# work. Care has to be taken to not affect the run_command tests for
# example, as they also use the `ghc --info` settings:
# quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
#
# If you want a test to run using the stage1 compiler, add it to the
# testsuite/tests/stage1 directory. Validate runs the tests in that
# directory with `make stage=1`.
# Cache the results of looking to see if we have a library or not.
# This makes quite a difference, especially on Windows.
have_lib_cache = {}
def have_library(lib):
""" Test whether the given library is available """
if lib in have_lib_cache:
got_it = have_lib_cache[lib]
else:
cmd = strip_quotes(config.ghc_pkg)
p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ghc_env)
# read from stdout and stderr to avoid blocking due to
# buffers filling
p.communicate()
r = p.wait()
got_it = r == 0
have_lib_cache[lib] = got_it
return got_it
def _reqlib( name, opts, lib ):
if not have_library(lib):
opts.expect = 'missing-lib'
def req_haddock( name, opts ):
if not config.haddock:
opts.expect = 'missing-lib'
def req_profiling( name, opts ):
'''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
if not config.have_profiling:
opts.expect = 'fail'
def req_shared_libs( name, opts ):
if not config.have_shared_libs:
opts.expect = 'fail'
def req_interp( name, opts ):
if not config.have_interp:
opts.expect = 'fail'
def req_smp( name, opts ):
if not config.have_smp:
opts.expect = 'fail'
def ignore_stdout(name, opts):
opts.ignore_stdout = True
def ignore_stderr(name, opts):
opts.ignore_stderr = True
def combined_output( name, opts ):
opts.combined_output = True
# -----
def expect_fail_for( ways ):
return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
def _expect_fail_for( name, opts, ways ):
opts.expect_fail_for = ways
def expect_broken( bug ):
# This test is a expected not to work due to the indicated trac bug
# number.
return lambda name, opts, b=bug: _expect_broken (name, opts, b )
def _expect_broken( name, opts, bug ):
record_broken(name, opts, bug)
opts.expect = 'fail';
def expect_broken_for( bug, ways ):
return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
def _expect_broken_for( name, opts, bug, ways ):
record_broken(name, opts, bug)
opts.expect_fail_for = ways
def record_broken(name, opts, bug):
me = (bug, opts.testdir, name)
if not me in brokens:
brokens.append(me)
def _expect_pass(way):
# Helper function. Not intended for use in .T files.
opts = getTestOpts()
return opts.expect == 'pass' and way not in opts.expect_fail_for
# -----
def fragile( bug ):
"""
Indicates that the test should be skipped due to fragility documented in
the given ticket.
"""
def helper( name, opts, bug=bug ):
record_broken(name, opts, bug)
opts.skip = True
return helper
def fragile_for( bug, ways ):
"""
Indicates that the test should be skipped due to fragility in the given
test ways as documented in the given ticket.
"""
def helper( name, opts, bug=bug, ways=ways ):
record_broken(name, opts, bug)
opts.omit_ways += ways
return helper
# -----
def omit_ways( ways ):
return lambda name, opts, w=ways: _omit_ways( name, opts, w )
def _omit_ways( name, opts, ways ):
opts.omit_ways += ways
# -----
def only_ways( ways ):
return lambda name, opts, w=ways: _only_ways( name, opts, w )
def _only_ways( name, opts, ways ):
opts.only_ways = ways
# -----
def extra_ways( ways ):
return lambda name, opts, w=ways: _extra_ways( name, opts, w )
def _extra_ways( name, opts, ways ):
opts.extra_ways = ways
# -----
def set_stdin( file ):
return lambda name, opts, f=file: _set_stdin(name, opts, f);
def _set_stdin( name, opts, f ):
opts.stdin = f
# -----
def exit_code( val ):
return lambda name, opts, v=val: _exit_code(name, opts, v);
def _exit_code( name, opts, v ):
opts.exit_code = v
def signal_exit_code( val ):
if opsys('solaris2'):
return exit_code( val )
else:
# When application running on Linux receives fatal error
# signal, then its exit code is encoded as 128 + signal
# value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
# I assume that Mac OS X behaves in the same way at least Mac
# OS X builder behavior suggests this.
return exit_code( val+128 )
# -----
def compile_timeout_multiplier( val ):
return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
def _compile_timeout_multiplier( name, opts, v ):
opts.compile_timeout_multiplier = v
def run_timeout_multiplier( val ):
return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
def _run_timeout_multiplier( name, opts, v ):
opts.run_timeout_multiplier = v
# -----
def extra_run_opts( val ):
return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
def _extra_run_opts( name, opts, v ):
opts.extra_run_opts = v
# -----
def extra_hc_opts( val ):
return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
def _extra_hc_opts( name, opts, v ):
opts.extra_hc_opts = v
# -----
def extra_clean( files ):
# TODO. Remove all calls to extra_clean.
return lambda _name, _opts: None
def extra_files(files):
return lambda name, opts: _extra_files(name, opts, files)
def _extra_files(name, opts, files):
opts.extra_files.extend(files)
# -----
# Defaults to "test everything, and only break on extreme cases"
#
# The inputs to this function are slightly interesting:
# metric can be either:
# - 'all', in which case all 3 possible metrics are collected and compared.
# - The specific metric one wants to use in the test.
# - A list of the metrics one wants to use in the test.
#
# Deviation defaults to 20% because the goal is correctness over performance.
# The testsuite should avoid breaking when there is not an actual error.
# Instead, the testsuite should notify of regressions in a non-breaking manner.
#
# collect_compiler_stats is used when the metrics collected are about the compiler.
# collect_stats is used in the majority case when the metrics to be collected
# are about the performance of the runtime code generated by the compiler.
def collect_compiler_stats(metric='all',deviation=20):
return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
def collect_stats(metric='all', deviation=20):
return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
def testing_metrics():
return ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
# This is an internal function that is used only in the implementation.
# 'is_compiler_stats_test' is somewhat of an unfortunate name.
# If the boolean is set to true, it indicates that this test is one that
# measures the performance numbers of the compiler.
# As this is a fairly rare case in the testsuite, it defaults to false to
# indicate that it is a 'normal' performance test.
def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
failBecause('This test has an invalid name.')
tests = Perf.get_perf_stats('HEAD^')
# Might have multiple metrics being measured for a single test.
test = [t for t in tests if t.test == name]
if tests == [] or test == []:
# There are no prior metrics for this test.
if isinstance(metric, str):
if metric == 'all':
for field in testing_metrics():
opts.stats_range_fields[field] = None
else:
opts.stats_range_fields[metric] = None
if isinstance(metric, list):
for field in metric:
opts.stats_range_fields[field] = None
return
if is_compiler_stats_test:
opts.is_compiler_stats_test = True
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if config.compiler_debugged and is_compiler_stats_test:
opts.skip = 1
# get the average value of the given metric from test
def get_avg_val(metric_2):
metric_2_metrics = [float(t.value) for t in test if t.metric == metric_2]
return sum(metric_2_metrics) / len(metric_2_metrics)
# 'all' is a shorthand to test for bytes allocated, peak megabytes allocated, and max bytes used.
if isinstance(metric, str):
if metric == 'all':
for field in testing_metrics():
opts.stats_range_fields[field] = (get_avg_val(field), deviation)
return
else:
opts.stats_range_fields[metric] = (get_avg_val(metric), deviation)
return
if isinstance(metric, list):
for field in metric:
opts.stats_range_fields[field] = (get_avg_val(field), deviation)
# -----
def when(b, f):
# When list_brokens is on, we want to see all expect_broken calls,
# so we always do f
if b or config.list_broken:
return f
else:
return normal
def unless(b, f):
return when(not b, f)
def doing_ghci():
return 'ghci' in config.run_ways
def ghc_dynamic():
return config.ghc_dynamic
def fast():
return config.speed == 2
def platform( plat ):
return config.platform == plat
def opsys( os ):
return config.os == os
def arch( arch ):
return config.arch == arch
def wordsize( ws ):
return config.wordsize == str(ws)
def msys( ):
return config.msys
def cygwin( ):
return config.cygwin
def have_vanilla( ):
return config.have_vanilla
def have_ncg( ):
return config.have_ncg
def have_dynamic( ):
return config.have_dynamic
def have_profiling( ):
return config.have_profiling
def in_tree_compiler( ):
return config.in_tree_compiler
def unregisterised( ):
return config.unregisterised
def compiler_profiled( ):
return config.compiler_profiled
def compiler_debugged( ):
return config.compiler_debugged
def have_gdb( ):
return config.have_gdb
def have_readelf( ):
return config.have_readelf
# ---
def high_memory_usage(name, opts):
opts.alone = True
# If a test is for a multi-CPU race, then running the test alone
# increases the chance that we'll actually see it.
def multi_cpu_race(name, opts):
opts.alone = True
# ---
def literate( name, opts ):
opts.literate = True
def c_src( name, opts ):
opts.c_src = True
def objc_src( name, opts ):
opts.objc_src = True
def objcpp_src( name, opts ):
opts.objcpp_src = True
def cmm_src( name, opts ):
opts.cmm_src = True
def outputdir( odir ):
return lambda name, opts, d=odir: _outputdir(name, opts, d)
def _outputdir( name, opts, odir ):
opts.outputdir = odir;
# ----
def pre_cmd( cmd ):
return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
def _pre_cmd( name, opts, cmd ):
opts.pre_cmd = cmd
# ----
def cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
def _cmd_prefix( name, opts, prefix ):
opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
# ----
def cmd_wrapper( fun ):
return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
def _cmd_wrapper( name, opts, fun ):
opts.cmd_wrapper = fun
# ----
def compile_cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
def _compile_cmd_prefix( name, opts, prefix ):
opts.compile_cmd_prefix = prefix
# ----
def check_stdout( f ):
return lambda name, opts, f=f: _check_stdout(name, opts, f)
def _check_stdout( name, opts, f ):
opts.check_stdout = f
def no_check_hp(name, opts):
opts.check_hp = False
# ----
def filter_stdout_lines( regex ):
""" Filter lines of stdout with the given regular expression """
def f( name, opts ):
_normalise_fun(name, opts, lambda s: '\n'.join(re.findall(regex, s)))
return f
def normalise_slashes( name, opts ):
_normalise_fun(name, opts, normalise_slashes_)
def normalise_exe( name, opts ):
_normalise_fun(name, opts, normalise_exe_)
def normalise_fun( *fs ):
return lambda name, opts: _normalise_fun(name, opts, fs)
def _normalise_fun( name, opts, *fs ):
opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
def normalise_errmsg_fun( *fs ):
return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
def _normalise_errmsg_fun( name, opts, *fs ):
opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
def check_errmsg(needle):
def norm(str):
if needle in str:
return "%s contained in -ddump-simpl\n" % needle
else:
return "%s not contained in -ddump-simpl\n" % needle
return normalise_errmsg_fun(norm)
def grep_errmsg(needle):
def norm(str):
return "".join(filter(lambda l: re.search(needle, l), str.splitlines(True)))
return normalise_errmsg_fun(norm)
def normalise_whitespace_fun(f):
return lambda name, opts: _normalise_whitespace_fun(name, opts, f)
def _normalise_whitespace_fun(name, opts, f):
opts.whitespace_normaliser = f
def normalise_version_( *pkgs ):
def normalise_version__( str ):
return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
'\\1-<VERSION>', str)
return normalise_version__
def normalise_version( *pkgs ):
def normalise_version__( name, opts ):
_normalise_fun(name, opts, normalise_version_(*pkgs))
_normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
return normalise_version__
def normalise_drive_letter(name, opts):
# Windows only. Change D:\\ to C:\\.
_normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
def keep_prof_callstacks(name, opts):
"""Keep profiling callstacks.
Use together with `only_ways(prof_ways)`.
"""
opts.keep_prof_callstacks = True
def join_normalisers(*a):
"""
Compose functions, flattening sequences.
join_normalisers(f1,[f2,f3],f4)
is the same as
lambda x: f1(f2(f3(f4(x))))
"""
def flatten(l):
"""
Taken from http://stackoverflow.com/a/2158532/946226
"""
for el in l:
if (isinstance(el, collections.Iterable)
and not isinstance(el, (bytes, str))):
for sub in flatten(el):
yield sub
else:
yield el
a = flatten(a)
fn = lambda x:x # identity function
for f in a:
assert callable(f)
fn = lambda x,f=f,fn=fn: fn(f(x))
return fn
# ----
# Function for composing two opt-fns together
def executeSetups(fs, name, opts):
if type(fs) is list:
# If we have a list of setups, then execute each one
for f in fs:
executeSetups(f, name, opts)
else:
# fs is a single function, so just apply it
fs(name, opts)
# -----------------------------------------------------------------------------
# The current directory of tests
def newTestDir(tempdir, dir):
global thisdir_settings
# reset the options for this test directory
def settings(name, opts, tempdir=tempdir, dir=dir):
return _newTestDir(name, opts, tempdir, dir)
thisdir_settings = settings
# Should be equal to entry in toplevel .gitignore.
testdir_suffix = '.run'
def _newTestDir(name, opts, tempdir, dir):
testdir = os.path.join('', *(p for p in PurePath(dir).parts if p != '..'))
opts.srcdir = os.path.join(os.getcwd(), dir)
opts.testdir = os.path.join(tempdir, testdir, name + testdir_suffix)
opts.compiler_always_flags = config.compiler_always_flags
# -----------------------------------------------------------------------------
# Actually doing tests
parallelTests = []
aloneTests = []
allTestNames = set([])
def runTest(watcher, opts, name, func, args):
if config.use_threads:
pool_sema.acquire()
t = threading.Thread(target=test_common_thread,
name=name,
args=(watcher, name, opts, func, args))
t.daemon = False
t.start()
else:
test_common_work(watcher, name, opts, func, args)
# name :: String
# setup :: [TestOpt] -> IO ()
def test(name, setup, func, args):
global aloneTests
global parallelTests
global allTestNames
global thisdir_settings
if name in allTestNames:
framework_fail(name, 'duplicate', 'There are multiple tests with this name')
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
framework_fail(name, 'bad_name', 'This test has an invalid name')
if config.run_only_some_tests:
if name not in config.only:
return
else:
# Note [Mutating config.only]
# config.only is initially the set of tests requested by
# the user (via 'make TEST='). We then remove all tests that
# we've already seen (in .T files), so that we can later
# report on any tests we couldn't find and error out.
config.only.remove(name)
# Make a deep copy of the default_testopts, as we need our own copy
# of any dictionaries etc inside it. Otherwise, if one test modifies
# them, all tests will see the modified version!
myTestOpts = copy.deepcopy(default_testopts)
executeSetups([thisdir_settings, setup], name, myTestOpts)
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
aloneTests.append(thisTest)
else:
parallelTests.append(thisTest)
allTestNames.add(name)
if config.use_threads:
def test_common_thread(watcher, name, opts, func, args):
try:
test_common_work(watcher, name, opts, func, args)
finally:
pool_sema.release()
def get_package_cache_timestamp():
if config.package_conf_cache_file == '':
return 0.0
else:
try:
return os.stat(config.package_conf_cache_file).st_mtime
except:
return 0.0
do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
def test_common_work(watcher, name, opts, func, args):
try:
t.total_tests += 1
setLocalTestOpts(opts)
package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
# All the ways we might run this test
if func == compile or func == multimod_compile:
all_ways = config.compile_ways
elif func == compile_and_run or func == multimod_compile_and_run:
all_ways = config.run_ways
elif func == ghci_script:
if 'ghci' in config.run_ways:
all_ways = ['ghci']
else:
all_ways = []
else:
all_ways = ['normal']
# A test itself can request extra ways by setting opts.extra_ways
all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
t.total_test_cases += len(all_ways)
ok_way = lambda way: \
not getTestOpts().skip \
and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
and (config.cmdline_ways == [] or way in config.cmdline_ways) \
and (not (config.skip_perf_tests and isStatsTest())) \
and (not (config.only_perf_tests and not isStatsTest())) \
and way not in getTestOpts().omit_ways
# Which ways we are asked to skip
do_ways = list(filter (ok_way,all_ways))
# Only run all ways in slow mode.
# See Note [validate and testsuite speed] in toplevel Makefile.
if config.accept:
# Only ever run one way
do_ways = do_ways[:1]
elif config.speed > 0:
# However, if we EXPLICITLY asked for a way (with extra_ways)
# please test it!
explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
do_ways = other_ways[:1] + explicit_ways
# Find all files in the source directory that this test
# depends on. Do this only once for all ways.
# Generously add all filenames that start with the name of
# the test to this set, as a convenience to test authors.
# They will have to use the `extra_files` setup function to
# specify all other files that their test depends on (but
# this seems to be necessary for only about 10% of all
# tests).
files = set(f for f in os.listdir(opts.srcdir)
if f.startswith(name) and not f == name and
not f.endswith(testdir_suffix) and
not os.path.splitext(f)[1] in do_not_copy)
for filename in (opts.extra_files + extra_src_files.get(name, [])):
if filename.startswith('/'):
framework_fail(name, 'whole-test',
'no absolute paths in extra_files please: ' + filename)
elif '*' in filename:
# Don't use wildcards in extra_files too much, as
# globbing is slow.
files.update((os.path.relpath(f, opts.srcdir)
for f in glob.iglob(in_srcdir(filename))))
elif filename:
files.add(filename)
else:
framework_fail(name, 'whole-test', 'extra_file is empty string')
# Run the required tests...
for way in do_ways:
if stopping():
break
try:
do_test(name, way, func, args, files)
except KeyboardInterrupt:
stopNow()
except Exception as e:
framework_fail(name, way, str(e))
traceback.print_exc()
t.n_tests_skipped += len(set(all_ways) - set(do_ways))
if config.cleanup and do_ways:
try:
cleanup()
except Exception as e:
framework_fail(name, 'runTest', 'Unhandled exception during cleanup: ' + str(e))
package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
except Exception as e:
framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
finally:
watcher.notify()
def do_test(name, way, func, args, files):
opts = getTestOpts()
full_name = name + '(' + way + ')'
if_verbose(2, "=====> {0} {1} of {2} {3}".format(
full_name, t.total_tests, len(allTestNames),
[len(t.unexpected_passes),
len(t.unexpected_failures),
len(t.framework_failures)]))
# Clean up prior to the test, so that we can't spuriously conclude
# that it passed on the basis of old run outputs.
cleanup()
os.makedirs(opts.testdir)
# Link all source files for this test into a new directory in
# /tmp, and run the test in that directory. This makes it
# possible to run tests in parallel, without modification, that
# would otherwise (accidentally) write to the same output file.
# It also makes it easier to keep the testsuite clean.
for extra_file in files:
src = in_srcdir(extra_file)
dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
if os.path.isfile(src):
link_or_copy_file(src, dst)
elif os.path.isdir(src):
if os.path.exists(dst):
shutil.rmtree(dst)
os.mkdir(dst)
lndir(src, dst)
else:
if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
# When using a ghc built without haddock support, .t
# files are rightfully missing. Don't
# framework_fail. Test will be skipped later.
pass
else:
framework_fail(name, way,
'extra_file does not exist: ' + extra_file)
if func.__name__ == 'run_command' or opts.pre_cmd:
# When running 'MAKE' make sure 'TOP' still points to the
# root of the testsuite.
src_makefile = in_srcdir('Makefile')
dst_makefile = in_testdir('Makefile')
if os.path.exists(src_makefile):
with io.open(src_makefile, 'r', encoding='utf8') as src:
makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
with io.open(dst_makefile, 'w', encoding='utf8') as dst:
dst.write(makefile)
if opts.pre_cmd:
exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, override_options(opts.pre_cmd)),
stderr = subprocess.STDOUT,
print_output = config.verbose >= 3)
# If user used expect_broken then don't record failures of pre_cmd
if exit_code != 0 and opts.expect not in ['fail']:
framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
if_verbose(1, '** pre_cmd was "{0}".'.format(override_options(opts.pre_cmd)))
result = func(*[name,way] + args)
if opts.expect not in ['pass', 'fail', 'missing-lib']:
framework_fail(name, way, 'bad expected ' + opts.expect)
try:
passFail = result['passFail']
except (KeyError, TypeError):
passFail = 'No passFail found'
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
if passFail == 'pass':
if _expect_pass(way):
t.expected_passes.append((directory, name, way))
t.n_expected_passes += 1
else:
if_verbose(1, '*** unexpected pass for %s' % full_name)
t.unexpected_passes.append((directory, name, 'unexpected', way))
elif passFail == 'fail':
if _expect_pass(way):
reason = result['reason']
tag = result.get('tag')
if tag == 'stat':
if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
t.unexpected_stat_failures.append((directory, name, reason, way))
else:
if_verbose(1, '*** unexpected failure for %s' % full_name)
t.unexpected_failures.append((directory, name, reason, way))
else:
if opts.expect == 'missing-lib':
t.missing_libs.append((directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
else:
framework_fail(name, way, 'bad result ' + passFail)
# Make is often invoked with -s, which means if it fails, we get
# no feedback at all. This is annoying. So let's remove the option
# if found and instead have the testsuite decide on what to do
# with the output.
def override_options(pre_cmd):
if config.verbose >= 5 and bool(re.match('\$make', pre_cmd, re.I)):
return pre_cmd.replace('-s' , '') \
.replace('--silent', '') \
.replace('--quiet' , '')
return pre_cmd
def framework_fail(name, way, reason):
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
t.framework_failures.append((directory, name, way, reason))
def framework_warn(name, way, reason):
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework warning for %s %s ' % (full_name, reason))
t.framework_warnings.append((directory, name, way, reason))
def badResult(result):
try:
if result['passFail'] == 'pass':
return False
return True
except (KeyError, TypeError):
return True
# -----------------------------------------------------------------------------
# Generic command tests
# A generic command test is expected to run and exit successfully.
#
# The expected exit code can be changed via exit_code() as normal, and
# the expected stdout/stderr are stored in <testname>.stdout and
# <testname>.stderr. The output of the command can be ignored
# altogether by using the setup function ignore_stdout instead of
# run_command.
def run_command( name, way, cmd ):
return simple_run( name, '', override_options(cmd), '' )
# -----------------------------------------------------------------------------
# GHCi tests
def ghci_script( name, way, script):
flags = ' '.join(get_compiler_flags())
way_flags = ' '.join(config.way_flags[way])
# We pass HC and HC_OPTS as environment variables, so that the
# script can invoke the correct compiler by using ':! $HC $HC_OPTS'
cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {way_flags} {flags}'
).format(flags=flags, way_flags=way_flags)
# NB: put way_flags before flags so that flags in all.T can overrie others
getTestOpts().stdin = script
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
# -----------------------------------------------------------------------------
# Compile-only tests
def compile( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], extra_hc_opts )
def compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], extra_hc_opts )
def backpack_typecheck( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
def backpack_typecheck_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=True )
def backpack_compile( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=True )
def backpack_compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=True )
def backpack_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=True )
def multimod_compile( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
# print 'Compile only, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result['hc_opts']
result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_stderr_file = find_expected_file(name, 'stderr')
actual_stderr_file = add_suffix(name, 'comp.stderr')
if not compare_outputs(way, 'stderr',
join_normalisers(getTestOpts().extra_errmsg_normaliser,
normalise_errmsg),
expected_stderr_file, actual_stderr_file,
whitespace_normaliser=getattr(getTestOpts(),
"whitespace_normaliser",
normalise_whitespace)):
return failBecause('stderr mismatch')
# no problems found, this test passed
return passed()
def compile_cmp_asm( name, way, extra_hc_opts ):
print('Compile only, extra args = ', extra_hc_opts)
result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_asm_file = find_expected_file(name, 'asm')
actual_asm_file = add_suffix(name, 's')
if not compare_outputs(way, 'asm',
join_normalisers(normalise_errmsg, normalise_asm),
expected_asm_file, actual_asm_file):
return failBecause('asm mismatch')
# no problems found, this test passed
return passed()
# -----------------------------------------------------------------------------
# Compile-and-run tests
def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
# print 'Compile and run, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result['hc_opts']
if way.startswith('ghci'): # interpreted...
return interpreter_run(name, way, extra_hc_opts, top_mod)
else: # compiled...
result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
if badResult(result):
return result
cmd = './' + name;
# we don't check the compiler's stderr for a compile-and-run test
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
def compile_and_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, '', [], extra_hc_opts)
def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
def stats( name, way, stats_file ):
opts = getTestOpts()
return check_stats(name, way, stats_file, opts.stats_range_fields)
def metric_dict(name, way, metric, value):
return Perf.PerfStat(
test_env = config.test_env,
test = name,
way = way,
metric = metric,
value = value)
# -----------------------------------------------------------------------------
# Check test stats. This prints the results for the user.
# name: name of the test.
# way: the way.
# stats_file: the path of the stats_file containing the stats for the test.
# range_fields
# Returns a pass/fail object. Passes if the stats are withing the expected value ranges.
# This prints the results for the user.
def check_stats(name, way, stats_file, range_fields):
result = passed()
if range_fields:
try:
f = open(in_testdir(stats_file))
except IOError as e:
return failBecause(str(e))
stats_file_contents = f.read()
f.close()
for (metric, range_val_dev) in range_fields.items():
field_match = re.search('\("' + metric + '", "([0-9]+)"\)', stats_file_contents)
if field_match == None:
print('Failed to find metric: ', metric)
metric_result = failBecause('no such stats metric')
else:
actual_val = int(field_match.group(1))
# Store the metric so it can later be stored in a git note.
perf_stat = metric_dict(name, way, metric, actual_val)
change = None
# If this is the first time running the benchmark, then pass.
if range_val_dev == None:
metric_result = passed()
change = MetricChange.NewMetric
else:
(expected_val, tolerance_dev) = range_val_dev
(change, metric_result) = Perf.check_stats_change(
perf_stat,
expected_val,
tolerance_dev,
config.allowed_perf_changes,
config.verbose >= 4)
t.metrics.append((change, perf_stat))
# If any metric fails then the test fails.
# Note, the remaining metrics are still run so that
# a complete list of changes can be presented to the user.
if metric_result['passFail'] == 'fail':
result = metric_result
return result
# -----------------------------------------------------------------------------
# Build a single-module program
def extras_build( way, extra_mods, extra_hc_opts ):
for mod, opts in extra_mods:
result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
if not (mod.endswith('.hs') or mod.endswith('.lhs')):
extra_hc_opts += ' ' + replace_suffix(mod, 'o')
if badResult(result):
return result
return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
opts = getTestOpts()
# Redirect stdout and stderr to the same file
stdout = in_testdir(name, 'comp.stderr')
stderr = subprocess.STDOUT
if top_mod != '':
srcname = top_mod
elif addsuf:
if backpack:
srcname = add_suffix(name, 'bkp')
else:
srcname = add_hs_lhs_suffix(name)
else:
srcname = name
if top_mod != '':
to_do = '--make '
if link:
to_do = to_do + '-o ' + name
elif backpack:
if link:
to_do = '-o ' + name + ' '
else:
to_do = ''
to_do = to_do + '--backpack '
elif link:
to_do = '-o ' + name
else:
to_do = '-c' # just compile
stats_file = name + '.comp.stats'
if isCompilerStatsTest():
extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
if backpack:
extra_hc_opts += ' -outputdir ' + name + '.out'
# Required by GHC 7.3+, harmless for earlier versions:
if (getTestOpts().c_src or
getTestOpts().objc_src or
getTestOpts().objcpp_src or
getTestOpts().cmm_src):
extra_hc_opts += ' -no-hs-main '
if getTestOpts().compile_cmd_prefix == '':
cmd_prefix = ''
else:
cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
'{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
if exit_code != 0 and not should_fail:
if config.verbose >= 1 and _expect_pass(way):
print('Compile failed (exit code {0}) errors were:'.format(exit_code))
actual_stderr_path = in_testdir(name, 'comp.stderr')
dump_file(actual_stderr_path)
# ToDo: if the sub-shell was killed by ^C, then exit
if isCompilerStatsTest():
statsResult = check_stats(name, way, stats_file, opts.stats_range_fields)
if badResult(statsResult):
return statsResult
if should_fail:
if exit_code == 0:
return failBecause('exit code 0')
else:
if exit_code != 0:
return failBecause('exit code non-0')
return passed()
# -----------------------------------------------------------------------------
# Run a program and check its output
#
# If testname.stdin exists, route input from that, else
# from /dev/null. Route output to testname.run.stdout and
# testname.run.stderr. Returns the exit code of the run.
def simple_run(name, way, prog, extra_run_opts):
opts = getTestOpts()
# figure out what to use for stdin
if opts.stdin:
stdin = in_testdir(opts.stdin)
elif os.path.exists(in_testdir(name, 'stdin')):
stdin = in_testdir(name, 'stdin')
else:
stdin = None
stdout = in_testdir(name, 'run.stdout')
if opts.combined_output:
stderr = subprocess.STDOUT
else:
stderr = in_testdir(name, 'run.stderr')
my_rts_flags = rts_flags(way)
stats_file = name + '.stats'
if isStatsTest() and not isCompilerStatsTest():
stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
else:
stats_args = ''
# Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
if opts.cmd_wrapper != None:
cmd = opts.cmd_wrapper(cmd)
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
# run the command
exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
# check the exit code
if exit_code != opts.exit_code:
if config.verbose >= 1 and _expect_pass(way):
print('Wrong exit code for ' + name + '(' + way + ')' + '(expected', opts.exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code')
if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
return failBecause('bad stderr')
if not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout')
check_hp = '-h' in my_rts_flags and opts.check_hp
check_prof = '-p' in my_rts_flags
# exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
return failBecause('bad heap profile')
if check_prof and not check_prof_ok(name, way):
return failBecause('bad profile')
return check_stats(name, way, stats_file, opts.stats_range_fields)
def rts_flags(way):
args = config.way_rts_flags.get(way, [])
return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
# -----------------------------------------------------------------------------
# Run a program in the interpreter and check its output
def interpreter_run(name, way, extra_hc_opts, top_mod):
opts = getTestOpts()
stdout = in_testdir(name, 'interp.stdout')
stderr = in_testdir(name, 'interp.stderr')
script = in_testdir(name, 'genscript')
if opts.combined_output:
framework_fail(name, 'unsupported',
'WAY=ghci and combined_output together is not supported')
if (top_mod == ''):
srcname = add_hs_lhs_suffix(name)
else:
srcname = top_mod
delimiter = '===== program output begins here\n'
with io.open(script, 'w', encoding='utf8') as f:
# set the prog name and command-line args to match the compiled
# environment.
f.write(':set prog ' + name + '\n')
f.write(':set args ' + opts.extra_run_opts + '\n')
# Add marker lines to the stdout and stderr output files, so we
# can separate GHCi's output from the program's.
f.write(':! echo ' + delimiter)
f.write(':! echo 1>&2 ' + delimiter)
# Set stdout to be line-buffered to match the compiled environment.
f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
# wrapping in GHC.TopHandler.runIO ensures we get the same output
# in the event of an exception as for the compiled program.
f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
if os.path.exists(stdin):
os.system('cat "{0}" >> "{1}"'.format(stdin, script))
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
if getTestOpts().cmd_wrapper != None:
cmd = opts.cmd_wrapper(cmd);
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
# split the stdout into compilation/program output
split_file(stdout, delimiter,
in_testdir(name, 'comp.stdout'),
in_testdir(name, 'run.stdout'))
split_file(stderr, delimiter,
in_testdir(name, 'comp.stderr'),
in_testdir(name, 'run.stderr'))
# check the exit code
if exit_code != getTestOpts().exit_code:
print('Wrong exit code for ' + name + '(' + way + ') (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code')
# ToDo: if the sub-shell was killed by ^C, then exit
if not (opts.ignore_stderr or stderr_ok(name, way)):
return failBecause('bad stderr')
elif not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout')
else:
return passed()
def split_file(in_fn, delimiter, out1_fn, out2_fn):
# See Note [Universal newlines].
with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
line = infile.readline()
while re.sub('^\s*','',line) != delimiter and line != '':
out1.write(line)
line = infile.readline()
line = infile.readline()
while line != '':
out2.write(line)
line = infile.readline()
# -----------------------------------------------------------------------------
# Utils
def get_compiler_flags():
opts = getTestOpts()
flags = copy.copy(opts.compiler_always_flags)
flags.append(opts.extra_hc_opts)
if opts.outputdir != None:
flags.extend(["-outputdir", opts.outputdir])
return flags
def stdout_ok(name, way):
actual_stdout_file = add_suffix(name, 'run.stdout')
expected_stdout_file = find_expected_file(name, 'stdout')
extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
check_stdout = getTestOpts().check_stdout
if check_stdout:
actual_stdout_path = in_testdir(actual_stdout_file)
return check_stdout(actual_stdout_path, extra_norm)
return compare_outputs(way, 'stdout', extra_norm,
expected_stdout_file, actual_stdout_file)
def dump_stdout( name ):
with open(in_testdir(name, 'run.stdout'), encoding='utf8') as f:
str = f.read().strip()
if str:
print("Stdout (", name, "):")
print(str)
def stderr_ok(name, way):
actual_stderr_file = add_suffix(name, 'run.stderr')
expected_stderr_file = find_expected_file(name, 'stderr')
return compare_outputs(way, 'stderr',
join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
expected_stderr_file, actual_stderr_file,
whitespace_normaliser=normalise_whitespace)
def dump_stderr( name ):
with open(in_testdir(name, 'run.stderr'), encoding='utf8') as f:
str = f.read().strip()
if str:
print("Stderr (", name, "):")
print(str)
def read_no_crs(file):
str = ''
try:
# See Note [Universal newlines].
with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
str = h.read()
except Exception:
# On Windows, if the program fails very early, it seems the
# files stdout/stderr are redirected to may not get created
pass
return str
def write_file(file, str):
# See Note [Universal newlines].
with io.open(file, 'w', encoding='utf8', newline='') as h:
h.write(str)
# Note [Universal newlines]
#
# We don't want to write any Windows style line endings ever, because
# it would mean that `make accept` would touch every line of the file
# when switching between Linux and Windows.
#
# Furthermore, when reading a file, it is convenient to translate all
# Windows style endings to '\n', as it simplifies searching or massaging
# the content.
#
# Solution: use `io.open` instead of `open`
# * when reading: use newline=None to translate '\r\n' to '\n'
# * when writing: use newline='' to not translate '\n' to '\r\n'
#
# See https://docs.python.org/2/library/io.html#io.open.
#
# This should work with both python2 and python3, and with both mingw*
# as msys2 style Python.
#
# Do note that io.open returns unicode strings. So we have to specify
# the expected encoding. But there is at least one file which is not
# valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
# Another solution would be to open files in binary mode always, and
# operate on bytes.
def check_hp_ok(name):
opts = getTestOpts()
# do not qualify for hp2ps because we should be in the right directory
hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
hp2psResult = runCmd(hp2psCmd)
actual_ps_path = in_testdir(name, 'ps')
if hp2psResult == 0:
if os.path.exists(actual_ps_path):
if gs_working:
gsResult = runCmd(genGSCmd(actual_ps_path))
if (gsResult == 0):
return (True)
else:
print("hp2ps output for " + name + "is not valid PostScript")
else: return (True) # assume postscript is valid without ghostscript
else:
print("hp2ps did not generate PostScript for " + name)
return (False)
else:
print("hp2ps error when processing heap profile for " + name)
return(False)
def check_prof_ok(name, way):
expected_prof_file = find_expected_file(name, 'prof.sample')
expected_prof_path = in_testdir(expected_prof_file)
# Check actual prof file only if we have an expected prof file to
# compare it with.
if not os.path.exists(expected_prof_path):
return True
actual_prof_file = add_suffix(name, 'prof')
actual_prof_path = in_testdir(actual_prof_file)
if not os.path.exists(actual_prof_path):
print(actual_prof_path + " does not exist")
return(False)
if os.path.getsize(actual_prof_path) == 0:
print(actual_prof_path + " is empty")
return(False)
return compare_outputs(way, 'prof', normalise_prof,
expected_prof_file, actual_prof_file,
whitespace_normaliser=normalise_whitespace)
# Compare expected output to actual output, and optionally accept the
# new output. Returns true if output matched or was accepted, false
# otherwise. See Note [Output comparison] for the meaning of the
# normaliser and whitespace_normaliser parameters.
def compare_outputs(way, kind, normaliser, expected_file, actual_file,
whitespace_normaliser=lambda x:x):
expected_path = in_srcdir(expected_file)
actual_path = in_testdir(actual_file)
if os.path.exists(expected_path):
expected_str = normaliser(read_no_crs(expected_path))
# Create the .normalised file in the testdir, not in the srcdir.
expected_normalised_file = add_suffix(expected_file, 'normalised')
expected_normalised_path = in_testdir(expected_normalised_file)
else:
expected_str = ''
expected_normalised_path = '/dev/null'
actual_raw = read_no_crs(actual_path)
actual_str = normaliser(actual_raw)
# See Note [Output comparison].
if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
return True
else:
if config.verbose >= 1 and _expect_pass(way):
print('Actual ' + kind + ' output differs from expected:')
if expected_normalised_path != '/dev/null':
write_file(expected_normalised_path, expected_str)
actual_normalised_path = add_suffix(actual_path, 'normalised')
write_file(actual_normalised_path, actual_str)
if config.verbose >= 1 and _expect_pass(way):
# See Note [Output comparison].
r = runCmd('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path),
print_output=True)
# If for some reason there were no non-whitespace differences,
# then do a full diff
if r == 0:
r = runCmd('diff -u "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path),
print_output=True)
if config.accept and (getTestOpts().expect == 'fail' or
way in getTestOpts().expect_fail_for):
if_verbose(1, 'Test is expected to fail. Not accepting new output.')
return False
elif config.accept and actual_raw:
if config.accept_platform:
if_verbose(1, 'Accepting new output for platform "'
+ config.platform + '".')
expected_path += '-' + config.platform
elif config.accept_os:
if_verbose(1, 'Accepting new output for os "'
+ config.os + '".')
expected_path += '-' + config.os
else:
if_verbose(1, 'Accepting new output.')
write_file(expected_path, actual_raw)
return True
elif config.accept:
if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
os.remove(expected_path)
return True
else:
return False
# Note [Output comparison]
#
# We do two types of output comparison:
#
# 1. To decide whether a test has failed. We apply a `normaliser` and an
# optional `whitespace_normaliser` to the expected and the actual
# output, before comparing the two.
#
# 2. To show as a diff to the user when the test indeed failed. We apply
# the same `normaliser` function to the outputs, to make the diff as
# small as possible (only showing the actual problem). But we don't
# apply the `whitespace_normaliser` here, because it might completely
# squash all whitespace, making the diff unreadable. Instead we rely
# on the `diff` program to ignore whitespace changes as much as
# possible (#10152).
def normalise_whitespace( str ):
# Merge contiguous whitespace characters into a single space.
return ' '.join(str.split())
callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
def normalise_callstacks(s):
opts = getTestOpts()
def repl(matches):
location = matches.group(1)
location = normalise_slashes_(location)
return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
# Ignore line number differences in call stacks (#10834).
s = re.sub(callSite_re, repl, s)
# Ignore the change in how we identify implicit call-stacks
s = s.replace('from ImplicitParams', 'from HasCallStack')
if not opts.keep_prof_callstacks:
# Don't output prof callstacks. Test output should be
# independent from the WAY we run the test.
s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
return s
tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
def normalise_type_reps(str):
""" Normalise out fingerprints from Typeable TyCon representations """
return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
def normalise_errmsg( str ):
"""Normalise error-messages emitted via stderr"""
# IBM AIX's `ld` is a bit chatty
if opsys('aix'):
str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
str = modify_lines(str, lambda l: re.sub(' error:', '', l))
str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
str = normalise_callstacks(str)
str = normalise_type_reps(str)
# If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
# the colon is there because it appears in error messages; this
# hacky solution is used in place of more sophisticated filename
# mangling
str = re.sub('([^\\s])\\.exe', '\\1', str)
# normalise slashes, minimise Windows/Unix filename differences
str = re.sub('\\\\', '/', str)
# The inplace ghc's are called ghc-stage[123] to avoid filename
# collisions, so we need to normalise that to just "ghc"
str = re.sub('ghc-stage[123]', 'ghc', str)
# Error messages sometimes contain integer implementation package
str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
# Error messages sometimes contain this blurb which can vary
# spuriously depending upon build configuration (e.g. based on integer
# backend)
str = re.sub('...plus ([a-z]+|[0-9]+) instances involving out-of-scope types',
'...plus N instances involving out-of-scope types', str)
# Also filter out bullet characters. This is because bullets are used to
# separate error sections, and tests shouldn't be sensitive to how the
# the division happens.
bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
str = str.replace(bullet, '')
# Windows only, this is a bug in hsc2hs but it is preventing
# stable output for the testsuite. See Trac #9775. For now we filter out this
# warning message to get clean output.
if config.msys:
str = re.sub('Failed to remove file (.*); error= (.*)$', '', str)
str = re.sub('DeleteFile "(.+)": permission denied \(Access is denied\.\)(.*)$', '', str)
return str
# normalise a .prof file, so that we can reasonably compare it against
# a sample. This doesn't compare any of the actual profiling data,
# only the shape of the profile and the number of entries.
def normalise_prof (str):
# strip everything up to the line beginning "COST CENTRE"
str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
# strip results for CAFs, these tend to change unpredictably
str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
# XXX Ignore Main.main. Sometimes this appears under CAF, and
# sometimes under MAIN.
str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
# We have something like this:
#
# MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
# CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
# readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
# readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
# main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
# == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
# == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
# showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
# showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
# readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
# readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
#
# then we remove all the specific profiling data, leaving only the cost
# centre name, module, src, and entries, to end up with this: (modulo
# whitespace between columns)
#
# MAIN MAIN <built-in> 0
# readPrec Main Main_1.hs:7:13-16 1
# readPrec Main Main_1.hs:4:13-16 1
# == Main Main_1.hs:7:25-26 1
# == Main Main_1.hs:4:25-26 1
# showsPrec Main Main_1.hs:7:19-22 2
# showsPrec Main Main_1.hs:4:19-22 2
# readPrec Main Main_1.hs:7:13-16 0
# readPrec Main Main_1.hs:4:13-16 0
# Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
# (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
# this works fine.
str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
'\\1 \\2 \\3 \\5\n', str)
return str
def normalise_slashes_( str ):
str = re.sub('\\\\', '/', str)
str = re.sub('//', '/', str)
return str
def normalise_exe_( str ):
str = re.sub('\.exe', '', str)
return str
def normalise_output( str ):
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
str = modify_lines(str, lambda l: re.sub(' error:', '', l))
str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
# Remove a .exe extension (for Windows)
# This can occur in error messages generated by the program.
str = re.sub('([^\\s])\\.exe', '\\1', str)
str = normalise_callstacks(str)
str = normalise_type_reps(str)
return str
def normalise_asm( str ):
lines = str.split('\n')
# Only keep instructions and labels not starting with a dot.
metadata = re.compile('^[ \t]*\\..*$')
out = []
for line in lines:
# Drop metadata directives (e.g. ".type")
if not metadata.match(line):
line = re.sub('@plt', '', line)
instr = line.lstrip().split()
# Drop empty lines.
if not instr:
continue
# Drop operands, except for call instructions.
elif instr[0] == 'call':
out.append(instr[0] + ' ' + instr[1])
else:
out.append(instr[0])
out = '\n'.join(out)
return out
def if_verbose( n, s ):
if config.verbose >= n:
print(s)
def dump_file(f):
try:
with io.open(f) as file:
print(file.read())
except Exception:
print('')
def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0, print_output=False):
timeout_prog = strip_quotes(config.timeout_prog)
timeout = str(int(ceil(config.timeout * timeout_multiplier)))
# Format cmd using config. Example: cmd='{hpc} report A.tix'
cmd = cmd.format(**config.__dict__)
if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
stdin_file = io.open(stdin, 'rb') if stdin else None
stdout_buffer = b''
stderr_buffer = b''
hStdErr = subprocess.PIPE
if stderr is subprocess.STDOUT:
hStdErr = subprocess.STDOUT
try:
# cmd is a complex command in Bourne-shell syntax
# e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
# Hence it must ultimately be run by a Bourne shell. It's timeout's job
# to invoke the Bourne shell
r = subprocess.Popen([timeout_prog, timeout, cmd],
stdin=stdin_file,
stdout=subprocess.PIPE,
stderr=hStdErr,
env=ghc_env)
stdout_buffer, stderr_buffer = r.communicate()
finally:
if stdin_file:
stdin_file.close()
if config.verbose >= 1 and print_output:
if stdout_buffer:
sys.stdout.buffer.write(stdout_buffer)
if stderr_buffer:
sys.stderr.buffer.write(stderr_buffer)
if stdout:
with io.open(stdout, 'wb') as f:
f.write(stdout_buffer)
if stderr:
if stderr is not subprocess.STDOUT:
with io.open(stderr, 'wb') as f:
f.write(stderr_buffer)
if r.returncode == 98:
# The python timeout program uses 98 to signal that ^C was pressed
stopNow()
if r.returncode == 99 and getTestOpts().exit_code != 99:
# Only print a message when timeout killed the process unexpectedly.
if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
return r.returncode
# -----------------------------------------------------------------------------
# checking if ghostscript is available for checking the output of hp2ps
def genGSCmd(psfile):
return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
def gsNotWorking():
global gs_working
print("GhostScript not available for hp2ps tests")
global gs_working
gs_working = False
if config.have_profiling:
if config.gs != '':
resultGood = runCmd(genGSCmd(config.top + '/config/good.ps'));
if resultGood == 0:
resultBad = runCmd(genGSCmd(config.top + '/config/bad.ps') +
' >/dev/null 2>&1')
if resultBad != 0:
print("GhostScript available for hp2ps tests")
gs_working = True
else:
gsNotWorking();
else:
gsNotWorking();
else:
gsNotWorking();
def add_suffix( name, suffix ):
if suffix == '':
return name
else:
return name + '.' + suffix
def add_hs_lhs_suffix(name):
if getTestOpts().c_src:
return add_suffix(name, 'c')
elif getTestOpts().cmm_src:
return add_suffix(name, 'cmm')
elif getTestOpts().objc_src:
return add_suffix(name, 'm')
elif getTestOpts().objcpp_src:
return add_suffix(name, 'mm')
elif getTestOpts().literate:
return add_suffix(name, 'lhs')
else:
return add_suffix(name, 'hs')
def replace_suffix( name, suffix ):
base, suf = os.path.splitext(name)
return base + '.' + suffix
def in_testdir(name, suffix=''):
return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
def in_srcdir(name, suffix=''):
return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
# Finding the sample output. The filename is of the form
#
# <test>.stdout[-ws-<wordsize>][-<platform>|-<os>]
#
def find_expected_file(name, suff):
basename = add_suffix(name, suff)
files = [basename + ws + plat
for plat in ['-' + config.platform, '-' + config.os, '']
for ws in ['-ws-' + config.wordsize, '']]
for f in files:
if os.path.exists(in_srcdir(f)):
return f
return basename
if config.msys:
import stat
def cleanup():
testdir = getTestOpts().testdir
max_attempts = 5
retries = max_attempts
def on_error(function, path, excinfo):
# At least one test (T11489) removes the write bit from a file it
# produces. Windows refuses to delete read-only files with a
# permission error. Try setting the write bit and try again.
os.chmod(path, stat.S_IWRITE)
function(path)
# On Windows we have to retry the delete a couple of times.
# The reason for this is that a FileDelete command just marks a
# file for deletion. The file is really only removed when the last
# handle to the file is closed. Unfortunately there are a lot of
# system services that can have a file temporarily opened using a shared
# readonly lock, such as the built in AV and search indexer.
#
# We can't really guarantee that these are all off, so what we can do is
# whenever after a rmtree the folder still exists to try again and wait a bit.
#
# Based on what I've seen from the tests on CI server, is that this is relatively rare.
# So overall we won't be retrying a lot. If after a reasonable amount of time the folder is
# still locked then abort the current test by throwing an exception, this so it won't fail
# with an even more cryptic error.
#
# See Trac #13162
exception = None
while retries > 0 and os.path.exists(testdir):
time.sleep((max_attempts-retries)*6)
try:
shutil.rmtree(testdir, onerror=on_error, ignore_errors=False)
except Exception as e:
exception = e
retries -= 1
if retries == 0 and os.path.exists(testdir):
raise Exception("Unable to remove folder '%s': %s\nUnable to start current test."
% (testdir, exception))
else:
def cleanup():
testdir = getTestOpts().testdir
if os.path.exists(testdir):
shutil.rmtree(testdir, ignore_errors=False)
# -----------------------------------------------------------------------------
# Return a list of all the files ending in '.T' below directories roots.
def findTFiles(roots):
for root in roots:
for path, dirs, files in os.walk(root, topdown=True):
# Never pick up .T files in uncleaned .run directories.
dirs[:] = [dir for dir in sorted(dirs)
if not dir.endswith(testdir_suffix)]
for filename in files:
if filename.endswith('.T'):
yield os.path.join(path, filename)
# -----------------------------------------------------------------------------
# Output a test summary to the specified file object
def summary(t, file, short=False, color=False):
file.write('\n')
printUnexpectedTests(file,
[t.unexpected_passes, t.unexpected_failures,
t.unexpected_stat_failures, t.framework_failures])
if short:
# Only print the list of unexpected tests above.
return
colorize = lambda s: s
if color:
if len(t.unexpected_failures) > 0 or \
len(t.unexpected_stat_failures) > 0 or \
len(t.framework_failures) > 0:
colorize = str_fail
else:
colorize = str_pass
file.write(colorize('SUMMARY') + ' for test run started at '
+ time.strftime("%c %Z", t.start_time) + '\n'
+ str(datetime.timedelta(seconds=
round(time.time() - time.mktime(t.start_time)))).rjust(8)
+ ' spent to go through\n'
+ repr(t.total_tests).rjust(8)
+ ' total tests, which gave rise to\n'
+ repr(t.total_test_cases).rjust(8)
+ ' test cases, of which\n'
+ repr(t.n_tests_skipped).rjust(8)
+ ' were skipped\n'
+ '\n'
+ repr(len(t.missing_libs)).rjust(8)
+ ' had missing libraries\n'
+ repr(t.n_expected_passes).rjust(8)
+ ' expected passes\n'
+ repr(t.n_expected_failures).rjust(8)
+ ' expected failures\n'
+ '\n'
+ repr(len(t.framework_failures)).rjust(8)
+ ' caused framework failures\n'
+ repr(len(t.framework_warnings)).rjust(8)
+ ' caused framework warnings\n'
+ repr(len(t.unexpected_passes)).rjust(8)
+ ' unexpected passes\n'
+ repr(len(t.unexpected_failures)).rjust(8)
+ ' unexpected failures\n'
+ repr(len(t.unexpected_stat_failures)).rjust(8)
+ ' unexpected stat failures\n'
+ '\n')
if t.unexpected_passes:
file.write('Unexpected passes:\n')
printTestInfosSummary(file, t.unexpected_passes)
if t.unexpected_failures:
file.write('Unexpected failures:\n')
printTestInfosSummary(file, t.unexpected_failures)
if t.unexpected_stat_failures:
file.write('Unexpected stat failures:\n')
printTestInfosSummary(file, t.unexpected_stat_failures)
if t.framework_failures:
file.write('Framework failures:\n')
printTestInfosSummary(file, t.framework_failures)
if t.framework_warnings:
file.write('Framework warnings:\n')
printTestInfosSummary(file, t.framework_warnings)
if stopping():
file.write('WARNING: Testsuite run was terminated early\n')
def printUnexpectedTests(file, testInfoss):
unexpected = set(name for testInfos in testInfoss
for (_, name, _, _) in testInfos
if not name.endswith('.T'))
if unexpected:
file.write('Unexpected results from:\n')
file.write('TEST="' + ' '.join(sorted(unexpected)) + '"\n')
file.write('\n')
def printTestInfosSummary(file, testInfos):
maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
for (directory, name, reason, way) in testInfos:
directory = directory.ljust(maxDirLen)
file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
file.write('\n')
def modify_lines(s, f):
s = '\n'.join([f(l) for l in s.splitlines()])
if s and s[-1] != '\n':
# Prevent '\ No newline at end of file' warnings when diffing.
s += '\n'
return s
|
from Analisis_Ascendente.Instrucciones.instruccion import Instruccion
import Analisis_Ascendente.Tabla_simbolos.TablaSimbolos as TS
import C3D.GeneradorTemporales as GeneradorTemporales
import Analisis_Ascendente.reportes.Reportes as Reportes
class CasePL(Instruccion):
''' #1 Case search
#2 Case '''
def __init__(self, caso, id, cases,elsecaso ,fila, columna):
self.caso = caso
self.id = id
self.cases = cases
self.elsecaso = elsecaso
self.fila = fila
self.columna = columna
def ejecutar(CasePL , ts, consola, exceptions):
if CasePL == 1:
print(CasePL.id)
print(CasePL.cases)
print(CasePL.elsecaso)
elif CasePL == 2:
print(CasePL.id)
print(CasePL.cases)
print(CasePL.elsecaso)
|
# Using Tensorflow 2.x
# Make sure to use the latest version of Tensorflow
# Using Tensorflow 2.x
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, (2, 2), input_shape=(64, 64, 3)),
tf.keras.layers.Conv2D(64, (2, 2)),
tf.keras.layers.Conv2D(32, (2, 2)),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(16, (2, 2), activation='relu'),
tf.keras.layers.Conv2D(8, (2, 2), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=1, activation='sigmoid')
])
# Compiling the CNN
model.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# PART 2 - FITTING CNN TO IMAGES
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
training_data = 'Data/train/'
testing_data = 'Data/test/'
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
training_data,
target_size=(64, 64),
batch_size=32,
class_mode='binary')
testing_set = test_datagen.flow_from_directory(
testing_data,
target_size=(64, 64),
batch_size=32,
class_mode='binary')
cnn = model.fit(training_set,
steps_per_epoch=160,
epochs=10,
validation_data=testing_set,
validation_steps=20)
saved_model = './Code/saved_model/'
model.save(saved_model)
print("Model saved.")
|
# -*- coding: utf-8 -*-
""""Windows Registry plugin for SAM Users Account information."""
from dfdatetime import filetime as dfdatetime_filetime
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import dtfabric_plugin
from plaso.parsers.winreg_plugins import interface
class SAMUsersWindowsRegistryEventData(events.EventData):
"""Class that defines SAM users Windows Registry event data.
Attributes:
account_rid (int): account relative identifier (RID).
comments (str): comments.
fullname (str): full name.
key_path (str): Windows Registry key path.
login_count (int): login count.
username (str): a string containing the username.
"""
DATA_TYPE = 'windows:registry:sam_users'
def __init__(self):
"""Initializes event data."""
super(SAMUsersWindowsRegistryEventData, self).__init__(
data_type=self.DATA_TYPE)
self.account_rid = None
self.comments = None
self.fullname = None
self.key_path = None
self.login_count = None
self.username = None
class SAMUsersWindowsRegistryPlugin(
dtfabric_plugin.DtFabricBaseWindowsRegistryPlugin):
"""Windows Registry plugin for SAM Users Account information."""
NAME = 'windows_sam_users'
DATA_FORMAT = 'Security Accounts Manager (SAM) users Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_LOCAL_MACHINE\\SAM\\SAM\\Domains\\Account\\Users')])
_DEFINITION_FILE = 'sam_users.yaml'
_V_VALUE_STRINGS_OFFSET = 0xcc
def _ParseFValue(self, registry_key):
"""Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed.
"""
registry_value = registry_key.GetValueByName('F')
if not registry_value:
raise errors.ParseError(
'missing value: "F" in Windows Registry key: {0:s}.'.format(
registry_key.name))
f_value_map = self._GetDataTypeMap('f_value')
try:
return self._ReadStructureFromByteStream(
registry_value.data, 0, f_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(exception)
def _ParseVValueString(
self, parser_mediator, data, user_information_descriptor):
"""Parses a V value string.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
data (bytes): Windows Registry V value data.
user_information_descriptor (user_information_descriptor): V value
user information descriptor.
Returns:
str: string value stored in the Windows Registry V value data.
"""
data_start_offset = (
user_information_descriptor.offset + self._V_VALUE_STRINGS_OFFSET)
data_end_offset = data_start_offset + user_information_descriptor.size
descriptor_data = data[data_start_offset:data_end_offset]
try:
username = descriptor_data.decode('utf-16-le')
except (UnicodeDecodeError, UnicodeEncodeError) as exception:
username = descriptor_data.decode('utf-16-le', errors='replace')
parser_mediator.ProduceExtractionWarning((
'unable to decode V value string with error: {0!s}. Characters '
'that cannot be decoded will be replaced with "?" or '
'"\\ufffd".').format(exception))
return username
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
names_key = registry_key.GetSubkeyByName('Names')
if not names_key:
parser_mediator.ProduceExtractionWarning('missing subkey: Names.')
return
last_written_time_per_username = {
registry_value.name: registry_value.last_written_time
for registry_value in names_key.GetSubkeys()}
for subkey in registry_key.GetSubkeys():
if subkey.name == 'Names':
continue
try:
f_value = self._ParseFValue(subkey)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse F value with error: {0!s}'.format(exception))
continue
registry_value = subkey.GetValueByName('V')
if not registry_value:
parser_mediator.ProduceExtractionWarning(
'missing Registry value: "V" in subkey: {0:s}.'.format(
subkey.name))
continue
v_value_map = self._GetDataTypeMap('v_value')
try:
v_value = self._ReadStructureFromByteStream(
registry_value.data, 0, v_value_map)
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse V value with error: {0!s}'.format(exception))
continue
username = self._ParseVValueString(
parser_mediator, registry_value.data, v_value[1])
fullname = self._ParseVValueString(
parser_mediator, registry_value.data, v_value[2])
comments = self._ParseVValueString(
parser_mediator, registry_value.data, v_value[3])
last_written_time = last_written_time_per_username.get(username, None)
# TODO: check if subkey.name == f_value.rid
event_data = SAMUsersWindowsRegistryEventData()
event_data.account_rid = f_value.rid
event_data.comments = comments
event_data.fullname = fullname
event_data.key_path = registry_key.path
event_data.login_count = f_value.number_of_logons
event_data.username = username
event = time_events.DateTimeValuesEvent(
last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if f_value.last_login_time != 0:
date_time = dfdatetime_filetime.Filetime(
timestamp=f_value.last_login_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_LOGIN)
parser_mediator.ProduceEventWithEventData(event, event_data)
if f_value.last_password_set_time != 0:
date_time = dfdatetime_filetime.Filetime(
timestamp=f_value.last_password_set_time)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_PASSWORD_RESET)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(SAMUsersWindowsRegistryPlugin)
|
import json
import os
import procrunner
def test_export_mosflm(dials_regression, tmpdir):
dials_regression_escaped = json.dumps(dials_regression).strip('"')
with open(
os.path.join(dials_regression, "experiment_test_data/experiment_1.json")
) as fi:
with (tmpdir / "experiments.json").open("w") as fo:
fo.write(fi.read().replace("$DIALS_REGRESSION", dials_regression_escaped))
result = procrunner.run(
["dials.export", "format=mosflm", "experiments.json"], working_directory=tmpdir
)
assert not result.returncode and not result.stderr
assert os.path.exists(tmpdir / "mosflm" / "index.mat")
with open(tmpdir / "mosflm" / "index.mat") as f:
lines = f.read()
assert (
lines
== """
-0.01210200 -0.01954526 0.00309519
-0.00416605 -0.00080573 -0.02427340
0.01931593 -0.01241956 -0.00329641
0.000 0.000 0.000
-0.52228050 -0.84350975 0.12535704
-0.17980379 -0.03477015 -0.98308781
0.83360283 -0.53598726 -0.13350648
42.2717 42.2720 39.6704 90.0001 89.9993 89.9998
0.000 0.000 0.000
""".strip(
"\n"
)
)
assert os.path.exists(tmpdir / "mosflm" / "mosflm.in")
with open(tmpdir / "mosflm" / "mosflm.in") as f:
lines = f.read()
assert (
lines
== """
DIRECTORY %s%scentroid_test_data
TEMPLATE centroid_####.cbf
SYMMETRY 89
BEAM 220.002 212.478
DISTANCE 190.1800
MATRIX index.mat
""".strip(
"\n"
)
% (dials_regression, os.path.sep)
)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from unittest import mock
from freshmaker import kojiservice
@mock.patch("freshmaker.kojiservice.koji")
def test_build_container_csv_mods(mock_koji):
mock_session = mock.Mock()
mock_session.buildContainer.return_value = 123
mock_koji.ClientSession.return_value = mock_session
svc = kojiservice.KojiService()
svc.build_container(
"git@domain.local:namespace/repo.git",
"1.0",
"repo-1.0",
operator_csv_modifications_url="https://domain.local/namespace/repo",
)
mock_session.buildContainer.assert_called_once_with(
"git@domain.local:namespace/repo.git",
"repo-1.0",
{
"git_branch": "1.0",
"operator_csv_modifications_url": "https://domain.local/namespace/repo",
"scratch": False,
},
)
@mock.patch("freshmaker.kojiservice.koji")
def test_get_ocp_versions_range(mock_koji):
mock_session = mock.Mock()
mock_session.getBuild.return_value = {"id": 123}
archives = [{
"arch": "x86_64",
"btype": "image",
"extra": {
"docker": {
"config": {
"architecture": "amd64",
"config": {
"Hostname": "c4b105e29878",
"Labels": {
"architecture": "x86_64",
"com.redhat.component": "foobar-bundle-container",
"com.redhat.delivery.backport": "true",
"com.redhat.delivery.operator.bundle": "true",
"com.redhat.openshift.versions": "v4.5,v4.6"
}
},
"os": "linux"
},
"id": "sha256:123"
},
"image": {
"arch": "x86_64"
}
},
"type_name": "tar"
}]
mock_session.listArchives.return_value = archives
mock_koji.ClientSession.return_value = mock_session
svc = kojiservice.KojiService()
assert svc.get_ocp_versions_range('foobar-2-123') == "v4.5,v4.6"
@mock.patch("freshmaker.kojiservice.koji")
@mock.patch("freshmaker.kojiservice.requests.get")
@mock.patch("freshmaker.kojiservice.ZipFile")
@mock.patch("freshmaker.kojiservice.BytesIO")
@mock.patch("freshmaker.kojiservice.yaml")
def test_get_bundle_csv_success(
mock_yaml, mock_bytesio, mock_zipfile, mock_get, mock_koji
):
mock_session = mock.Mock()
mock_session.getBuild.return_value = {
"id": 123,
"nvr": "foobar-bundle-container-2.0-123",
"extra": {"operator_manifests_archive": "operator_manifests.zip"}
}
mock_koji.ClientSession.return_value = mock_session
mock_get.return_value = mock.Mock(ok=True)
mock_zipfile.return_value.namelist.return_value = [
"foobar-v2.0-opr-1.clusterserviceversion.yaml",
"foobar_crd.yaml",
"foobar_artemisaddress_crd.yaml",
"foobar_artemisscaledown_crd.yaml"
]
mock_yaml.safe_load.return_value = {
"apiVersion": "operators.coreos.com/v1alpha1",
"kind": "ClusterServiceVersion",
"spec": {"version": "2.0-opr-1"},
"metadata": {"name": "foobar-2.0-opr-1"}
}
svc = kojiservice.KojiService()
csv = svc.get_bundle_csv("foobar-bundle-container-2.0-123")
assert csv["metadata"]["name"] == "foobar-2.0-opr-1"
assert csv["spec"]["version"] == "2.0-opr-1"
@mock.patch("freshmaker.kojiservice.log")
@mock.patch("freshmaker.kojiservice.koji")
@mock.patch("freshmaker.kojiservice.requests.get")
def test_get_bundle_csv_unavailable(mock_get, mock_koji, mock_log):
mock_session = mock.Mock()
mock_session.getBuild.return_value = {
"id": 123,
"nvr": "foobar-bundle-container-2.0-123",
"extra": {}
}
mock_koji.ClientSession.return_value = mock_session
svc = kojiservice.KojiService()
csv = svc.get_bundle_csv("foobar-bundle-container-2.0-123")
assert csv is None
mock_log.error.assert_any_call(
"Operator manifests archive is unavaiable for build %s", "foobar-bundle-container-2.0-123"
)
|
# -*- coding: utf-8 -*-
"""
Utilities for the CLI functions.
"""
import re
import click
import json
from .instance import import_module
from ..interfaces.base import InputMultiPath, traits
from ..interfaces.base.support import get_trait_desc
# different context options
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
UNKNOWN_OPTIONS = dict(allow_extra_args=True, ignore_unknown_options=True)
# specification of existing ParamTypes
ExistingDirPath = click.Path(exists=True, file_okay=False, resolve_path=True)
ExistingFilePath = click.Path(exists=True, dir_okay=False, resolve_path=True)
UnexistingFilePath = click.Path(dir_okay=False, resolve_path=True)
# validators
def check_not_none(ctx, param, value):
if value is None:
raise click.BadParameter('got {}.'.format(value))
return value
# declare custom click.ParamType
class RegularExpression(click.ParamType):
name = 'regex'
def convert(self, value, param, ctx):
try:
rex = re.compile(value, re.IGNORECASE)
except ValueError:
self.fail('%s is not a valid regular expression.' % value, param,
ctx)
else:
return rex
class PythonModule(click.ParamType):
name = 'Python module path'
def convert(self, value, param, ctx):
try:
module = import_module(value)
except ValueError:
self.fail('%s is not a valid Python module.' % value, param, ctx)
else:
return module
def add_args_options(arg_parser, interface):
"""Add arguments to `arg_parser` to create a CLI for `interface`."""
inputs = interface.input_spec()
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
desc = "\n".join(get_trait_desc(inputs, name, spec))[len(name) + 2:]
# Escape any % signs with a %
desc = desc.replace('%', '%%')
args = {}
has_multiple_inner_traits = False
if spec.is_trait_type(traits.Bool):
args["default"] = getattr(inputs, name)
args["action"] = 'store_true'
# current support is for simple trait types
if not spec.inner_traits:
if not spec.is_trait_type(traits.TraitCompound):
trait_type = type(spec.trait_type.default_value)
if trait_type in (bytes, str, int, float):
if trait_type == bytes:
trait_type = str
args["type"] = trait_type
elif len(spec.inner_traits) == 1:
trait_type = type(spec.inner_traits[0].trait_type.default_value)
if trait_type == bytes:
trait_type = str
if trait_type in (bytes, bool, str, int, float):
args["type"] = trait_type
else:
if len(spec.inner_traits) > 1:
if not spec.is_trait_type(traits.Dict):
has_multiple_inner_traits = True
if getattr(spec, "mandatory", False):
if spec.is_trait_type(InputMultiPath):
args["nargs"] = "+"
elif spec.is_trait_type(traits.List):
if (spec.trait_type.minlen == spec.trait_type.maxlen) and \
spec.trait_type.maxlen:
args["nargs"] = spec.trait_type.maxlen
else:
args["nargs"] = "+"
elif spec.is_trait_type(traits.Dict):
args["type"] = json.loads
if has_multiple_inner_traits:
raise NotImplementedError(
('This interface cannot be used. via the'
' command line as multiple inner traits'
' are currently not supported for mandatory'
' argument: {}.'.format(name)))
arg_parser.add_argument(name, help=desc, **args)
else:
if spec.is_trait_type(InputMultiPath):
args["nargs"] = "*"
elif spec.is_trait_type(traits.List):
if (spec.trait_type.minlen == spec.trait_type.maxlen) and \
spec.trait_type.maxlen:
args["nargs"] = spec.trait_type.maxlen
else:
args["nargs"] = "*"
if not has_multiple_inner_traits:
arg_parser.add_argument(
"--%s" % name, dest=name, help=desc, **args)
return arg_parser
|
#
# PySNMP MIB module HP-ICF-DHCPv6-RELAY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HP-ICF-DHCPv6-RELAY
# Produced by pysmi-0.3.4 at Mon Apr 29 19:21:12 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Unsigned32, IpAddress, NotificationType, Counter32, ObjectIdentity, ModuleIdentity, Counter64, Bits, MibIdentifier, Integer32, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Unsigned32", "IpAddress", "NotificationType", "Counter32", "ObjectIdentity", "ModuleIdentity", "Counter64", "Bits", "MibIdentifier", "Integer32", "iso", "TimeTicks")
RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TextualConvention", "DisplayString")
hpicfDhcpv6Relay = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50))
hpicfDhcpv6Relay.setRevisions(('2014-02-12 00:00', '2012-04-24 00:00', '2008-04-08 06:05',))
if mibBuilder.loadTexts: hpicfDhcpv6Relay.setLastUpdated('201402120000Z')
if mibBuilder.loadTexts: hpicfDhcpv6Relay.setOrganization('HP Networking')
hpicfDhcpv6RelayAdminStatus = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfDhcpv6RelayAdminStatus.setStatus('current')
hpicfDhcpRelayHelperAddressTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2), )
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddressTable.setStatus('current')
hpicfDhcpRelayHelperAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddressType"), (0, "HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddress"))
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddressEntry.setStatus('current')
hpicfDhcpRelayHelperAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2, 1, 1), InetAddressType())
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddressType.setStatus('current')
hpicfDhcpRelayHelperAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2, 1, 2), InetAddress())
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddress.setStatus('current')
hpicfDhcpRelayHelperAddressEgressInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2, 1, 3), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddressEgressInterface.setStatus('current')
hpicfDhcpRelayHelperAddressStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpicfDhcpRelayHelperAddressStatus.setStatus('current')
hpicfDhcpRelayPerInterfaceStatsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3), )
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceStatsTable.setStatus('current')
hpicfDhcpRelayPerInterfaceStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceStatsEntry.setStatus('current')
hpicfDhcpRelayPerInterfaceClientPktsRecd = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceClientPktsRecd.setStatus('current')
hpicfDhcpRelayPerInterfaceClientPktsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceClientPktsDropped.setStatus('current')
hpicfDhcpRelayPerInterfaceClientPktsXmitFail = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceClientPktsXmitFail.setStatus('current')
hpicfDhcpRelayPerInterfaceServerPktsRecd = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceServerPktsRecd.setStatus('current')
hpicfDhcpRelayPerInterfaceServerPktsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceServerPktsDropped.setStatus('current')
hpicfDhcpRelayPerInterfaceServerPktsXmitFail = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPerInterfaceServerPktsXmitFail.setStatus('current')
hpicfDhcpRelayGlobalStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5))
hpicfDhcpv6RelayOptions = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 6))
hpicfDhcpRelayPktsDropped = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayPktsDropped.setStatus('current')
hpicfDhcpRelayErrorPktsDropped = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayErrorPktsDropped.setStatus('current')
hpicfDhcpRelayTotalPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayTotalPktsReceived.setStatus('current')
hpicfDhcpRelaySolicitPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelaySolicitPktsReceived.setStatus('current')
hpicfDhcpRelayRequestPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRequestPktsReceived.setStatus('current')
hpicfDhcpRelayConfirmPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayConfirmPktsReceived.setStatus('current')
hpicfDhcpRelayRenewPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRenewPktsReceived.setStatus('current')
hpicfDhcpRelayRebindPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRebindPktsReceived.setStatus('current')
hpicfDhcpRelayReleasePktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayReleasePktsReceived.setStatus('current')
hpicfDhcpRelayDeclinePktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayDeclinePktsReceived.setStatus('current')
hpicfDhcpRelayInformationReqPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayInformationReqPktsReceived.setStatus('current')
hpicfDhcpRelayRelayForwardPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRelayForwardPktsReceived.setStatus('current')
hpicfDhcpRelayRelayReplyPktsReceived = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRelayReplyPktsReceived.setStatus('current')
hpicfDhcpRelayTotalPktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayTotalPktsSent.setStatus('current')
hpicfDhcpRelayAdvertisePktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayAdvertisePktsSent.setStatus('current')
hpicfDhcpRelayReconfigurePktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayReconfigurePktsSent.setStatus('current')
hpicfDhcpRelayReplyPktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayReplyPktsSent.setStatus('current')
hpicfDhcpRelayRelayForwardPktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRelayForwardPktsSent.setStatus('current')
hpicfDhcpRelayRelayReplyPktsSent = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 5, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfDhcpRelayRelayReplyPktsSent.setStatus('current')
hpicfDhcpv6RelayOption79Status = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfDhcpv6RelayOption79Status.setStatus('current')
hpicfDhcpRelayConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4))
hpicfDhcpRelayGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 1))
hpicfDhcpRelayCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 2))
hpicfDhcpRelayConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 1, 1)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpv6RelayAdminStatus"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddressEgressInterface"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddressStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayConfigGroup = hpicfDhcpRelayConfigGroup.setStatus('deprecated')
hpicfDhcpRelayConfigGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 1, 4)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpv6RelayAdminStatus"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddressEgressInterface"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayHelperAddressStatus"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpv6RelayOption79Status"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayConfigGroup1 = hpicfDhcpRelayConfigGroup1.setStatus('current')
hpicfDhcpRelayStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 1, 2)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsRecd"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsXmitFail"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsRecd"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsXmitFail"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayStatsGroup = hpicfDhcpRelayStatsGroup.setStatus('deprecated')
hpicfDhcpRelayStatsGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 1, 3)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsRecd"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceClientPktsXmitFail"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsRecd"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPerInterfaceServerPktsXmitFail"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayErrorPktsDropped"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayTotalPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelaySolicitPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRequestPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayConfirmPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRenewPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRebindPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayReleasePktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayDeclinePktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayInformationReqPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRelayForwardPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRelayReplyPktsReceived"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayTotalPktsSent"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayAdvertisePktsSent"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayReconfigurePktsSent"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayReplyPktsSent"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRelayForwardPktsSent"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayRelayReplyPktsSent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayStatsGroup1 = hpicfDhcpRelayStatsGroup1.setStatus('current')
hpicfDhcpRelayCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 2, 1)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayConfigGroup"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayStatsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayCompliance = hpicfDhcpRelayCompliance.setStatus('deprecated')
hpicfDhcpRelayCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 2, 2)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayConfigGroup"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayStatsGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayCompliance1 = hpicfDhcpRelayCompliance1.setStatus('deprecated')
hpicfDhcpRelayCompliance2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 50, 4, 2, 3)).setObjects(("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayConfigGroup1"), ("HP-ICF-DHCPv6-RELAY", "hpicfDhcpRelayStatsGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDhcpRelayCompliance2 = hpicfDhcpRelayCompliance2.setStatus('current')
mibBuilder.exportSymbols("HP-ICF-DHCPv6-RELAY", hpicfDhcpRelayCompliance2=hpicfDhcpRelayCompliance2, hpicfDhcpRelayRebindPktsReceived=hpicfDhcpRelayRebindPktsReceived, hpicfDhcpRelayTotalPktsReceived=hpicfDhcpRelayTotalPktsReceived, hpicfDhcpRelayPerInterfaceClientPktsDropped=hpicfDhcpRelayPerInterfaceClientPktsDropped, hpicfDhcpRelayPerInterfaceClientPktsXmitFail=hpicfDhcpRelayPerInterfaceClientPktsXmitFail, hpicfDhcpRelayHelperAddress=hpicfDhcpRelayHelperAddress, hpicfDhcpRelayTotalPktsSent=hpicfDhcpRelayTotalPktsSent, hpicfDhcpRelayCompliance=hpicfDhcpRelayCompliance, hpicfDhcpv6Relay=hpicfDhcpv6Relay, hpicfDhcpv6RelayOption79Status=hpicfDhcpv6RelayOption79Status, hpicfDhcpv6RelayAdminStatus=hpicfDhcpv6RelayAdminStatus, hpicfDhcpRelayReconfigurePktsSent=hpicfDhcpRelayReconfigurePktsSent, hpicfDhcpRelayConfirmPktsReceived=hpicfDhcpRelayConfirmPktsReceived, hpicfDhcpRelayStatsGroup1=hpicfDhcpRelayStatsGroup1, hpicfDhcpRelayRelayReplyPktsReceived=hpicfDhcpRelayRelayReplyPktsReceived, hpicfDhcpRelayConfigGroup=hpicfDhcpRelayConfigGroup, hpicfDhcpRelayGlobalStatistics=hpicfDhcpRelayGlobalStatistics, hpicfDhcpRelayHelperAddressEgressInterface=hpicfDhcpRelayHelperAddressEgressInterface, hpicfDhcpRelayDeclinePktsReceived=hpicfDhcpRelayDeclinePktsReceived, hpicfDhcpRelayPerInterfaceStatsTable=hpicfDhcpRelayPerInterfaceStatsTable, hpicfDhcpRelayAdvertisePktsSent=hpicfDhcpRelayAdvertisePktsSent, hpicfDhcpRelayRelayForwardPktsReceived=hpicfDhcpRelayRelayForwardPktsReceived, hpicfDhcpRelayHelperAddressStatus=hpicfDhcpRelayHelperAddressStatus, hpicfDhcpRelayPerInterfaceServerPktsDropped=hpicfDhcpRelayPerInterfaceServerPktsDropped, hpicfDhcpRelayPerInterfaceServerPktsRecd=hpicfDhcpRelayPerInterfaceServerPktsRecd, hpicfDhcpRelayCompliance1=hpicfDhcpRelayCompliance1, hpicfDhcpRelayGroups=hpicfDhcpRelayGroups, hpicfDhcpRelayRelayReplyPktsSent=hpicfDhcpRelayRelayReplyPktsSent, hpicfDhcpRelayConformance=hpicfDhcpRelayConformance, PYSNMP_MODULE_ID=hpicfDhcpv6Relay, hpicfDhcpRelayRequestPktsReceived=hpicfDhcpRelayRequestPktsReceived, hpicfDhcpRelayHelperAddressEntry=hpicfDhcpRelayHelperAddressEntry, hpicfDhcpRelayHelperAddressTable=hpicfDhcpRelayHelperAddressTable, hpicfDhcpRelayRenewPktsReceived=hpicfDhcpRelayRenewPktsReceived, hpicfDhcpRelayConfigGroup1=hpicfDhcpRelayConfigGroup1, hpicfDhcpRelayInformationReqPktsReceived=hpicfDhcpRelayInformationReqPktsReceived, hpicfDhcpRelaySolicitPktsReceived=hpicfDhcpRelaySolicitPktsReceived, hpicfDhcpRelayStatsGroup=hpicfDhcpRelayStatsGroup, hpicfDhcpRelayPktsDropped=hpicfDhcpRelayPktsDropped, hpicfDhcpRelayCompliances=hpicfDhcpRelayCompliances, hpicfDhcpRelayPerInterfaceServerPktsXmitFail=hpicfDhcpRelayPerInterfaceServerPktsXmitFail, hpicfDhcpRelayReleasePktsReceived=hpicfDhcpRelayReleasePktsReceived, hpicfDhcpRelayPerInterfaceClientPktsRecd=hpicfDhcpRelayPerInterfaceClientPktsRecd, hpicfDhcpRelayHelperAddressType=hpicfDhcpRelayHelperAddressType, hpicfDhcpRelayReplyPktsSent=hpicfDhcpRelayReplyPktsSent, hpicfDhcpv6RelayOptions=hpicfDhcpv6RelayOptions, hpicfDhcpRelayRelayForwardPktsSent=hpicfDhcpRelayRelayForwardPktsSent, hpicfDhcpRelayPerInterfaceStatsEntry=hpicfDhcpRelayPerInterfaceStatsEntry, hpicfDhcpRelayErrorPktsDropped=hpicfDhcpRelayErrorPktsDropped)
|
from PyQt5 import QtTest
from Functions import WatchStoriesAction
from random import randint
from PyQt5.QtCore import QThread
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
def watchStoriesFromAccount(browser, targetAccount, TargetAmount):
pageMovement = ActionChains(browser)
pageMovement.send_keys(Keys.ARROW_RIGHT)
acc_count = 0
try:
while acc_count < len(targetAccount):
browser.get('https://www.instagram.com/' + targetAccount[acc_count] + '/')
QThread.sleep(5)
browser.find_element_by_xpath("//a[contains(@href,'/follower')]").click()
QThread.sleep(5)
count = 0
while count < TargetAmount:
count = WatchStoriesAction.watchStoriesAction(browser, count, targetAccount[acc_count], pageMovement)
if count == TargetAmount:
print('Done returning to instagram...')
browser.get('https://www.instagram.com')
acc_count += 1
except:
print('Account is private or does not exist. Exiting')
browser.get('https://www.instagram.com')
|
################################################################################
# BSD LICENSE
#
# Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import subprocess
import re
import time
import test
import pytest
from priority import PRIORITY_HIGH
class TestPqosCMT(test.Test):
## @cond
@pytest.fixture(autouse=True)
def init(self, request):
super(TestPqosCMT, self).init(request)
yield
super(TestPqosCMT, self).fini()
## @endcond
## PQOS - CMT Detection
#
# \b Priority: High
#
# \b Objective:
# Verify CMT capability is detected on platform
#
# \b Instruction:
# Run "pqos [-I] -d" to print supported capabilities
#
# \b Result:
# Observe "LLC Occupancy" in "Cache Monitoring Technology (CMT) events" section
@PRIORITY_HIGH
@pytest.mark.rdt_supported("cqm_occup_llc")
def test_pqos_cmt_detection(self, iface):
(stdout, _, exitstatus) = self.run_pqos(iface, "-d")
assert exitstatus == 0
assert re.search(r"Cache Monitoring Technology \(CMT\) events:\s*LLC Occupancy", stdout)
## PQOS - CMT Monitor LLC occupancy (cores)
#
# \b Priority: High
#
# \b Objective:
# Verify CMT values for core
#
# \b Instruction:
# 1. Run "taskset -c 4 memtester 100M" in the background
# 2. Run "pqos [-I] -m llc:0-15" to start CMT monitoring
# 3. Terminate memtester
#
# \b Result:
# Value in LLC[KB] column for core 4 is much higher than for other cores
@PRIORITY_HIGH
@pytest.mark.rdt_supported("cqm_occup_llc")
def test_pqos_cmt_llc_occupancy_cores(self, iface):
def get_cmt(output, core):
cmt = None
lines = output.split("\n")
for line in lines:
match = re.search(r"^\s*([0-9]*)\s*[0-9]*\.[0-9]*\s*[0-9]*k\s*([0-9]*\.[0-9])\s*$",
line)
if match:
curr_core = int(match.group(1))
if curr_core == core:
cmt = float(match.group(2))
return cmt
command = "taskset -c 4 memtester 100M"
subprocess.Popen(command.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE)
time.sleep(2)
(stdout, _, exitcode) = self.run_pqos(iface, "-m llc:0-15 -t 1")
assert exitcode == 0
assert re.search(r"CORE\s*IPC\s*MISSES\s*LLC\[KB\]", stdout)
cmt = get_cmt(stdout, 4)
assert cmt > 1000
for core in range(15):
if core == 4:
continue
assert get_cmt(stdout, core) < cmt / 2
## PQOS - CMT Monitor LLC occupancy (tasks)
#
# \b Priority: High
#
# \b Objective:
# Verify CMT values for task id
#
# \b Instruction:
# 1. Run "memtester 100M" in the background
# 2. Run "pqos -I -p llc:<memtester pid> -p llc:1" to start CMT monitoring
# 3. Terminate memtester
#
# \b Result:
# LLC column present in output. LLC value for memtester is much higher than for other PID
@PRIORITY_HIGH
@pytest.mark.iface_os
@pytest.mark.rdt_supported("cqm_occup_llc")
def test_pqos_cmt_llc_occupancy_tasks(self, iface):
def get_cmt(output, pid):
cmt = None
lines = output.split("\n")
for line in lines:
# pylint: disable=line-too-long
match = re.search(r"^\s*([0-9]*)\s*[0-9,]*\s*[0-9]*\.[0-9]*\s*[0-9]*k\s*([0-9]*\.[0-9])\s*$", line)
if match:
curr_pid = int(match.group(1))
if curr_pid == pid:
cmt = float(match.group(2))
return cmt
command = "memtester 100M"
memtester = subprocess.Popen(command.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
time.sleep(2)
(stdout, _, exitcode) = self.run_pqos(iface, "-p llc:1 -p llc:%d -t 1" % memtester.pid)
assert exitcode == 0
assert re.search(r"PID\s*CORE\s*IPC\s*MISSES\s*LLC\[KB\]", stdout) is not None
assert get_cmt(stdout, memtester.pid) > 1000
assert get_cmt(stdout, 1) < 500
## PQOS - CMT Monitor LLC occupancy - percentage LLC for tasks
#
# \b Priority: High
#
# \b Objective:
# Verify CMT values for task id - value should be displayed as percentage of total cache
#
# \b Instruction:
# 1. Run "memtester 100M" in the background
# 2. Run "pqos -I -p llc:<memtester pid> -p llc:1 -P" to start CMT monitoring and LLC
# displayed as percentage value
# 3. Terminate memtester
#
# \b Result:
# LLC column present in output and is shown in percents. LLC value for memtester is much
# higher than for other PID
@PRIORITY_HIGH
@pytest.mark.iface_os
@pytest.mark.rdt_supported("cqm_occup_llc")
def test_pqos_cmt_llc_occupancy_tasks_percent(self, iface):
def get_cmt_percent(output, pid):
cmt = None
lines = output.split("\n")
for line in lines:
match = re.match(r"^\s*(\d+)" # PID number
r"(?:\s+\S+){3}" # CORE, IPC and MISSES (ignored)
r"\s+(\d{1,3}\.\d+)\s*$", # LLC[%] value
line)
if match:
curr_pid = int(match.group(1))
if curr_pid == pid:
cmt = float(match.group(2))
return cmt
command = "memtester 100M"
memtester = subprocess.Popen(command.split(), stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
time.sleep(2)
(stdout, _, exitcode) = self.run_pqos(iface,
"-p llc:1 -p llc:%d -t 2 -P" % memtester.pid)
assert exitcode == 0
assert re.search(r"PID\s*CORE\s*IPC\s*MISSES\s*LLC\[%\]", stdout) is not None
memtester_percent = get_cmt_percent(stdout, memtester.pid)
pid_one_percent = get_cmt_percent(stdout, 1)
# assuming that memtester will show higher LLC load than other pid
assert memtester_percent > pid_one_percent
|
# -*- coding: utf-8 -*-
'''
Copyright (C) 2012-2018 Diego Torres Milano
Created on Jan 5, 2015
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
from __future__ import print_function
__version__ = '20.0.0b6'
import ast
import os
import platform
import re
def _nd(name):
'''
@return: Returns a named decimal regex
'''
return '(?P<%s>\d+)' % name
def _nh(name):
'''
@return: Returns a named hex regex
'''
return '(?P<%s>[0-9a-f]+)' % name
def _ns(name, greedy=False):
'''
NOTICE: this is using a non-greedy (or minimal) regex
@type name: str
@param name: the name used to tag the expression
@type greedy: bool
@param greedy: Whether the regex is greedy or not
@return: Returns a named string regex (only non-whitespace characters allowed)
'''
return '(?P<%s>\S+%s)' % (name, '' if greedy else '?')
def obtainPxPy(m):
px = int(m.group('px'))
py = int(m.group('py'))
return (px, py)
def obtainVxVy(m):
wvx = int(m.group('vx'))
wvy = int(m.group('vy'))
return wvx, wvy
def obtainVwVh(m):
(wvx, wvy) = obtainVxVy(m)
wvx1 = int(m.group('vx1'))
wvy1 = int(m.group('vy1'))
return (wvx1 - wvx, wvy1 - wvy)
def which(program, isWindows=False):
import os
def is_exe(_fpath, _isWindows):
return os.path.isfile(_fpath) and os.access(_fpath, os.X_OK if not _isWindows else os.F_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program, isWindows):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file, isWindows):
return exe_file
return None
def obtainAdbPath():
'''
Obtains the ADB path attempting know locations for different OSs
'''
FORCE_FAIL = False
''' Sometimes, you want it to fail to check the error messages '''
osName = platform.system()
isWindows = False
adb = 'adb'
if (osName.startswith('Windows')) or (osName.startswith('Java')):
envOSName = os.getenv('os') # this should work as it has been set since xp.
if envOSName.startswith('Windows'):
adb = 'adb.exe'
isWindows = True
exeFile = which(adb, isWindows)
if exeFile:
return exeFile
ANDROID_HOME = os.environ['ANDROID_HOME'] if 'ANDROID_HOME' in os.environ else '/opt/android-sdk'
HOME = os.environ['HOME'] if 'HOME' in os.environ else ''
possibleChoices = [os.path.join(ANDROID_HOME, 'platform-tools', adb),
os.path.join(HOME, "android", 'platform-tools', adb),
os.path.join(HOME, "android-sdk", 'platform-tools', adb),
]
if osName.startswith('Windows'):
possibleChoices.append(os.path.join("""C:\Program Files\Android\android-sdk\platform-tools""", adb))
possibleChoices.append(os.path.join("""C:\Program Files (x86)\Android\android-sdk\platform-tools""", adb))
elif osName.startswith('Linux'):
possibleChoices.append(os.path.join(os.sep, "opt", "android-sdk-linux", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-linux", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-linux", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, 'Android', 'Sdk', 'platform-tools', adb))
elif osName.startswith('Mac'):
possibleChoices.append(os.path.join(HOME, "Library", "Android", "sdk", 'platform-tools', adb))
possibleChoices.append(os.path.join(os.sep, "opt", "android-sdk-mac_x86", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-mac", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-mac", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-mac_x86", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-mac_x86", 'platform-tools', adb))
else:
# Unsupported OS
pass
possibleChoices.append(adb)
checkedFiles = []
for exeFile in possibleChoices:
checkedFiles.append(exeFile)
if not FORCE_FAIL and os.access(exeFile, os.X_OK):
return exeFile
for path in os.environ["PATH"].split(os.pathsep):
exeFile = os.path.join(path, adb)
checkedFiles.append(exeFile)
if not FORCE_FAIL and exeFile is not None and os.access(exeFile, os.X_OK if not isWindows else os.F_OK):
return exeFile
if 'ANDROID_HOME' not in os.environ:
helpMsg = 'Did you forget to set ANDROID_HOME in the environment?'
else:
helpMsg = ''
raise Exception('''adb="%s" is not executable. %s
These files we unsuccessfully checked to find a suitable '%s' executable:
%s
''' % (adb, helpMsg, adb, "\n ".join(checkedFiles)))
def profileStart():
import cProfile
global profile
profile = cProfile.Profile()
profile.enable()
def profileEnd():
profile.disable()
import io, pstats
import sys
s = io.StringIO()
ps = pstats.Stats(profile, stream=s).sort_stats('cumulative')
ps.print_stats()
print('.' * 60, file=sys.stderr)
print("STATS:\n", s.getvalue(), file=sys.stderr)
print('.' * 60, file=sys.stderr)
def debugArgsToDict(a):
"""
Converts a string representation of debug arguments to a dictionary.
The string can be of the form
IDENTIFIER1=val1,IDENTIFIER2=val2
:param a: the argument string
:return: the dictionary
"""
s = a.replace('+', ' ')
s = s.replace('=', ':')
s = re.sub(r'([A-Z][A-Z_]+)', r"'\1'", s)
return ast.literal_eval('{ ' + s + ' }')
|
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Find email addresses that match the person's name
"""
# end_pymotw_header
import re
address = re.compile(
"""
# The regular name
(?P<first_name>\w+)
\s+
(([\w.]+)\s+)? # optional middle name or initial
(?P<last_name>\w+)
\s+
<
# The address: first_name.last_name@domain.tld
(?P<email>
(?P=first_name)
\.
(?P=last_name)
@
([\w\d.]+\.)+ # domain name prefix
(com|org|edu) # limit the allowed top-level domains
)
>
""",
re.VERBOSE | re.IGNORECASE,
)
candidates = [
u"First Last <first.last@example.com>",
u"Different Name <first.last@example.com>",
u"First Middle Last <first.last@example.com>",
u"First M. Last <first.last@example.com>",
]
for candidate in candidates:
print("Candidate:", candidate)
match = address.search(candidate)
if match:
print(" Match name :", match.groupdict()["first_name"], end=" ")
print(match.groupdict()["last_name"])
print(" Match email:", match.groupdict()["email"])
else:
print(" No match")
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Script to generate fake Kitti files with random data for testing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import zipfile
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.object_detection import kitti
from tensorflow_datasets.testing import fake_data_utils
flags.DEFINE_string("tfds_dir", py_utils.tfds_dir(),
"Path to tensorflow_datasets directory")
FLAGS = flags.FLAGS
NUM_IMAGES = 10
NUM_VIDEOS = 5
HEIGHT = 375
WIDTH = 1242
OBJECTS = [
"Car",
"Van",
"Truck",
"Pedestrian",
"Person_sitting",
"Cyclist",
"Tram",
"Misc",
]
def _get_png():
"""Returns a random png image."""
image = fake_data_utils.get_random_picture(HEIGHT, WIDTH)
png = tf.image.encode_png(image)
with utils.nogpu_session() as sess:
res = sess.run(png)
return res
def _list_f2s(float_list):
"""Converts a list of floats to strings."""
return ["{:0.2f}".format(x) for x in float_list]
def _list_d2s(int_list):
"""Converts a list of ints to strings."""
return ["{:d}".format(x) for x in int_list]
def _get_object_annotation():
"""Returns a annotation for a random object."""
objects = kitti._OBJECT_LABELS # pylint: disable=protected-access
obj_type = list(np.random.choice(objects, size=1))
truncated = _list_f2s(np.random.rand(1))
occluded = _list_d2s(np.random.choice(range(4), size=1))
alpha = _list_f2s(np.random.uniform(low=-np.pi, high=np.pi, size=1))
lr = np.random.uniform(low=0, high=WIDTH, size=2)
tb = np.random.uniform(low=0, high=HEIGHT, size=2)
# Left, top, right, bottom. Origin is the top-left pixel.
bbox = _list_f2s([min(lr), HEIGHT - max(tb), max(lr), HEIGHT - min(tb)])
# Height, width, length.
dimensions = _list_f2s(np.random.uniform(low=0, high=5, size=3))
location = _list_f2s(np.random.uniform(low=0, high=30, size=3))
rotation = _list_f2s(np.random.uniform(low=-np.pi, high=np.pi, size=1))
return " ".join(obj_type + truncated + occluded + alpha + bbox + dimensions +
location + rotation)
def _get_dontcare_object_annotation():
"""Returns a annotation for a random object in class `DontCare`."""
obj_type = ["DontCare"]
truncated = _list_f2s([-1])
occluded = _list_d2s([-1])
alpha = _list_f2s([-10])
lr = np.random.uniform(low=0, high=WIDTH, size=2)
tb = np.random.uniform(low=0, high=HEIGHT, size=2)
# Left, top, right, bottom. Origin is the top-left pixel.
bbox = _list_f2s([min(lr), HEIGHT - max(tb), max(lr), HEIGHT - min(tb)])
# Height, width, length.
dimensions = _list_f2s([-1] * 3)
location = _list_f2s([-1000] * 3)
rotation = _list_f2s([-10])
return " ".join(obj_type + truncated + occluded + alpha + bbox + dimensions +
location + rotation)
def _get_annotations():
"""Generates annotations for a random number of objects in the image."""
annotation = []
for _ in range(np.random.choice(range(1, 10))):
annotation.append(_get_object_annotation())
# Add some DontCare objects.
for _ in range(np.random.choice(range(1, 3))):
annotation.append(_get_dontcare_object_annotation())
return annotation
def _output_dir():
"""Returns output directory."""
return os.path.join(FLAGS.tfds_dir, "testing", "test_data", "fake_examples",
"kitti")
def _get_label_file(annotation):
"""Returns path to label files."""
fobj = tempfile.NamedTemporaryFile(delete=False, mode="wb", suffix=".txt")
for row in annotation:
fobj.write(row + "\n")
fobj.close()
return fobj.name
def _get_mapping_files():
"""Returns dummy image to video mapping files."""
# Random indices file.
train_rand = np.random.permutation(range(1, NUM_IMAGES + 1)) # 1-based index
fobj_rand = tempfile.NamedTemporaryFile(
delete=False, mode="wb", suffix=".txt")
fobj_rand.write(",".join([str(x) for x in train_rand])) # pytype: disable=wrong-arg-types
fobj_rand.close()
# Mapping file.
fobj_map = tempfile.NamedTemporaryFile(delete=False, mode="wb", suffix=".txt")
assert NUM_IMAGES > NUM_VIDEOS
assert NUM_IMAGES % NUM_VIDEOS == 0
vid_ids = list(range(NUM_VIDEOS)) * (NUM_IMAGES // NUM_VIDEOS)
for vid in vid_ids:
row = "2011_09_26 2011_09_26_drive_00{:02d}_sync 0000000123".format(vid)
fobj_map.write(row + "\n") # pytype: disable=wrong-arg-types
fobj_map.close()
return fobj_rand.name, fobj_map.name
def _create_zip_files():
"""Saves png and label using name index."""
if not os.path.exists(_output_dir()):
os.makedirs(_output_dir())
images_out_path = os.path.join(_output_dir(), "data_object_image_2.zip")
with zipfile.ZipFile(images_out_path, "w") as image_zip:
for i in range(NUM_IMAGES):
png = fake_data_utils.get_random_png(HEIGHT, WIDTH)
image_zip.write(
png, os.path.join("training", "image_2",
"image_{:06d}.png".format(i)))
label_out_path = os.path.join(_output_dir(), "data_object_label_2.zip")
with zipfile.ZipFile(label_out_path, "w") as label_zip:
for i in range(NUM_IMAGES):
annotation = _get_annotations()
label = _get_label_file(annotation)
label_zip.write(
label,
os.path.join("training", "label_2", "label_{:06d}.txt".format(i)))
devkit_out_path = os.path.join(_output_dir(), "devkit_object.zip")
with zipfile.ZipFile(devkit_out_path, "w") as devkit_zip:
train_rand, train_mapping = _get_mapping_files()
devkit_zip.write(train_rand, os.path.join("mapping", "train_rand.txt"))
devkit_zip.write(train_mapping, os.path.join("mapping",
"train_mapping.txt"))
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
_create_zip_files()
if __name__ == "__main__":
app.run(main)
|
import asyncio
import logging
import os
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.bitvavo import enums
from cryptoxlib.clients.bitvavo.exceptions import BitvavoException
LOG = logging.getLogger("cryptoxlib")
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
print(f"Available loggers: {[name for name in logging.root.manager.loggerDict]}")
async def order_book_update(response: dict) -> None:
print(f"Callback order_book_update: [{response}]")
async def run():
api_key = os.environ['BITVAVOAPIKEY']
sec_key = os.environ['BITVAVOSECKEY']
client = CryptoXLib.create_bitvavo_client(api_key, sec_key)
print("Time:")
await client.get_time()
print("Exchange info:")
await client.get_exchange_info()
print("Assets:")
await client.get_assets()
print("Open orders:")
await client.get_open_orders()
print("Create order:")
try:
await client.create_order(pair = Pair("BTC", "EUR"), side = enums.OrderSide.BUY, type = enums.OrderType.LIMIT,
amount = "10000", price = "1")
except BitvavoException as e:
print(e)
print("Cancel order:")
try:
await client.cancel_order(pair = Pair("BTC", "EUR"), order_id = "1")
except BitvavoException as e:
print(e)
print("Balance:")
await client.get_balance()
await client.close()
if __name__ == "__main__":
asyncio.run(run())
|
# -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
class AbstractNet:
@classmethod
def max_checkpoint(cls) -> int:
return max(0, len(cls.CHECKPOINTS) * 2016 - 1)
class BitcoinMainnet(AbstractNet):
TESTNET = False
WIF_PREFIX = 128
ADDRTYPE_P2PKH = 60
ADDRTYPE_P2SH = 122
ADDRTYPE_P2SH_ALT = 122
SEGWIT_HRP = ""
GENESIS = "0000006b444bc2f2ffe627be9d9e7e7a0730000870ef6eb6da46c8eae389df90"
DEFAULT_PORTS = {'t': '50001', 's': '50002'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('checkpoints.json', [])
XPRV_HEADERS = {
'standard': 0x0488ade4, # xprv
'p2wpkh-p2sh': 0x049d7878, # yprv
'p2wsh-p2sh': 0x0295b005, # Yprv
'p2wpkh': 0x04b2430c, # zprv
'p2wsh': 0x02aa7a99, # Zprv
}
XPUB_HEADERS = {
'standard': 0x0488b21e, # xpub
'p2wpkh-p2sh': 0x049d7cb2, # ypub
'p2wsh-p2sh': 0x0295b43f, # Ypub
'p2wpkh': 0x04b24746, # zpub
'p2wsh': 0x02aa7ed3, # Zpub
}
BIP44_COIN_TYPE = 175
class BitcoinTestnet(AbstractNet):
TESTNET = True
WIF_PREFIX = 239
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
ADDRTYPE_P2SH_ALT = 196
SEGWIT_HRP = ""
GENESIS = "0000006b444bc2f2ffe627be9d9e7e7a0730000870ef6eb6da46c8eae389df90"
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
XPRV_HEADERS = {
'standard': 0x04358394, # tprv
'p2wpkh-p2sh': 0x044a4e28, # uprv
'p2wsh-p2sh': 0x024285b5, # Uprv
'p2wpkh': 0x045f18bc, # vprv
'p2wsh': 0x02575048, # Vprv
}
XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
'p2wpkh-p2sh': 0x044a5262, # upub
'p2wsh-p2sh': 0x024289ef, # Upub
'p2wpkh': 0x045f1cf6, # vpub
'p2wsh': 0x02575483, # Vpub
}
BIP44_COIN_TYPE = 175
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = ""
GENESIS = "7543a69d7c2fcdb29a5ebec2fc064c074a35253b6f3072c8a749473aa590a29c"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
class BitcoinSimnet(BitcoinTestnet):
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
# don't import net directly, import the module instead (so that net is singleton)
net = BitcoinMainnet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
|
import datetime as dt
from paraview import servermanager
from paraview.simple import *
from paraview.benchmark import *
#import logbase, logparser
logbase.maximize_logs()
records = []
n0 = dt.datetime.now()
def get_render_view(size):
'''Similar to GetRenderView except if a new view is created, it's
created with the specified size instead of having t resize afterwards
'''
view = active_objects.view
if not view:
# it's possible that there's no active view, but a render view exists.
# If so, locate that and return it (before trying to create a new one).
view = servermanager.GetRenderView()
if not view:
view = CreateRenderView(ViewSize=size)
return view
def save_render_buffer(fname):
'''Similar to SaveScreenshot except a re-render will not be triggered'''
from vtkmodules.vtkRenderingCore import vtkWindowToImageFilter
w = GetRenderView().SMProxy.GetRenderWindow()
w2i = vtkWindowToImageFilter()
w2i.ReadFrontBufferOff()
w2i.ShouldRerenderOff()
w2i.SetInput(w)
w2i.Modified()
png = PNGWriter()
png.Input = w2i.GetOutput()
png.FileName = fname
png.UpdatePipeline()
def flush_render_buffer():
'''When running as a single process use the WindowToImage filter to
force a framebuffer read. This bypasses driver optimizations that
perform lazy rendering and allows you to get actual frame rates for
a single process with a GPU. Multi-process doesn't need this since
compositing forces the frame buffer read.
'''
# If we're not using off-screen rendering then we can bypass this since
# the frame buffer display will force a GL flush
w = GetRenderView().SMProxy.GetRenderWindow()
if not w.GetOffScreenRendering():
return
from vtkmodules.vtkParallelCore import vtkMultiProcessController
from vtkmodules.vtkRenderingCore import vtkWindowToImageFilter
# If we're using MPI we can also bypass this since compositing will
# for a GL flush
controller = vtkMultiProcessController.GetGlobalController()
if controller.GetNumberOfProcesses() > 1:
return
# Force a GL flush by retrieving the frame buffer image
w2i = vtkWindowToImageFilter()
w2i.ReadFrontBufferOff()
w2i.ShouldRerenderOff()
w2i.SetInput(w)
w2i.Modified()
w2i.Update()
def memtime_stamp():
global records
global n0
m = logbase.get_memuse()
n1 = dt.datetime.now()
et = n1 - n0
print(et, m)
n0 = n1
records.append([et, m])
def run(output_basename='log', dimension=100, view_size=(1920, 1080),
num_frames=10, save_logs=True, ospray=False):
from vtkmodules.vtkParallelCore import vtkMultiProcessController
from vtkmodules.vtkCommonSystem import vtkTimerLog
controller = vtkMultiProcessController.GetGlobalController()
view = get_render_view(view_size)
if ospray:
view.EnableOSPRay = 1
print('Generating wavelet')
wavelet = Wavelet()
d2 = dimension/2
wavelet.WholeExtent = [-d2, d2, -d2, d2, -d2, d2]
wavelet.Maximum = 100.0
waveletDisplay = Show()
waveletDisplay.SetRepresentationType('Volume')
print('Repositioning initial camera')
c = GetActiveCamera()
c.Azimuth(22.5)
c.Elevation(22.5)
print('Rendering first frame')
Render()
print('Saving frame 0 screenshot')
import math
fdigits = int(math.ceil(math.log(num_frames, 10)))
frame_fname_fmt = output_basename + '.scene.f%(f)0' + str(fdigits) + 'd.png'
SaveScreenshot(frame_fname_fmt % {'f': 0})
print('Gathering geometry counts')
vtkTimerLog.MarkStartEvent('GetViewItemStats')
num_voxels = 0
for r in view.Representations:
num_voxels += r.GetRepresentedDataInformation().GetNumberOfCells()
vtkTimerLog.MarkEndEvent('GetViewItemStats')
print('Beginning benchmark loop')
deltaAz = 45.0 / num_frames
deltaEl = 45.0 / num_frames
memtime_stamp()
fpsT0 = dt.datetime.now()
for frame in range(1, num_frames):
c.Azimuth(deltaAz)
c.Elevation(deltaEl)
Render()
flush_render_buffer()
memtime_stamp()
fpsT1 = dt.datetime.now()
if controller.GetLocalProcessId() == 0:
if save_logs:
# Save the arguments this was executed with
with open(output_basename + '.args.txt', 'w') as argfile:
argfile.write(str({
'output_basename': output_basename,
'dimension': dimension,
'view_size': view_size,
'num_frames': num_frames,
'ospray' : ospray,
'save_logs': save_logs}))
# Save the memory statistics collected
with open(output_basename + '.mem.txt', 'w') as ofile:
ofile.write('\n'.join([str(x) for x in records]))
# Process frame timing statistics
logparser.summarize_results(num_frames, (fpsT1-fpsT0).total_seconds(),
num_voxels, 'Voxels', save_logs,
output_basename)
def main(argv):
import argparse
parser = argparse.ArgumentParser(
description='Benchmark ParaView geometry rendering')
parser.add_argument('-o', '--output-basename', default='log', type=str,
help='Basename to use for generated output files')
parser.add_argument('-d', '--dimension', default=100, type=int,
help='The dimension of each side of the cubic volume')
parser.add_argument('-v', '--view-size', default=[400, 400],
type=lambda s: [int(x) for x in s.split(',')],
help='View size used to render')
parser.add_argument('-f', '--frames', default=10, type=int,
help='Number of frames')
parser.add_argument('-y', '--ospray', action='store_true',
help='Use OSPRAY to render')
args = parser.parse_args(argv)
options = servermanager.vtkProcessModule.GetProcessModule().GetOptions()
url = options.GetServerURL()
if url:
import re
m = re.match('([^:/]*://)?([^:]*)(:([0-9]+))?', url)
if m.group(4):
Connect(m.group(2), m.group(4))
else:
Connect(m.group(2))
run(output_basename=args.output_basename, dimension=args.dimension,
view_size=args.view_size, num_frames=args.frames, ospray=args.ospray)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
from csv_address_expander import CsvAddressExpander
def test_expand_row():
row = {"address": "61 Wellfield Rd. R. Cardiff", "country": "Wales"}
other_fields = ["country"]
expanded_rows = CsvAddressExpander.expand_row(row, other_fields)
expanded_rows = sorted(expanded_rows, key=lambda row: row["normalized_address"])
expected_normalized_addresses = [
'61 wellfield road r cardiff',
'61 wellfield road rear cardiff',
'61 wellfield road right cardiff',
'61 wellfield road road cardiff'
]
assert 4 == len(expanded_rows)
for index, expanded_row in enumerate(expanded_rows):
assert expected_normalized_addresses[index] == expanded_row["normalized_address"]
|
import sys
try:
from .tabcmd import main
except ImportError:
print("Tabcmd needs to be run as a module, it cannot be run as a script")
print("Try running python -m tabcmd")
sys.exit(1)
if __name__ == "__main__":
main()
|
"""
Base Django settings
====================
For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
import pathlib
from django.urls import reverse_lazy
PROJECT_PACKAGE_NAME = 'example'
# BASE DIRECTORIES
# ------------------------------------------------------------------------------
# Two base directories are considered for this project:
# The PROJECT_PATH corresponds to the path towards the root of this project (the root of the
# repository).
# The INSTALL_PATH corresponds to the path towards the directory where the project's repository
# is present on the filesystem.
# By default INSTALL_PATH has the same than PROJECT_PATH.
PROJECT_PATH = pathlib.Path(__file__).parents[2]
INSTALL_PATH = pathlib.Path(os.environ.get('DJANGO_INSTALL_PATH')) \
if 'DJANGO_INSTALL_PATH' in os.environ else PROJECT_PATH
# APP CONFIGURATION
# ------------------------------------------------------------------------------
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-party apps
'oidc_rp',
# Django's admin app
'django.contrib.admin',
)
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oidc_rp.middleware.OIDCRefreshIDTokenMiddleware',
)
# DEBUG CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': str(PROJECT_PATH / 'example.db'),
},
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'EST'
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*', ]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#languages
LANGUAGES = (
('en', 'English'),
('fr', 'Français'),
)
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = 'INSECURE'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'templates'),
),
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
'loaders': [
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
]
},
},
]
# FILE STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(INSTALL_PATH / 'static')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'static' / 'build'),
str(PROJECT_PATH / PROJECT_PACKAGE_NAME / 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-STATICFILES_STORAGE
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(INSTALL_PATH / 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL CONFIGURATION
# ------------------------------------------------------------------------------
ROOT_URLCONF = PROJECT_PACKAGE_NAME + '_project.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# AUTH CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = reverse_lazy('oidc_auth_request')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'oidc_rp.backends.OIDCAuthBackend',
'django.contrib.auth.backends.ModelBackend',
]
# OIDC RELYING PARTY CONFIGURATION
# ------------------------------------------------------------------------------
OIDC_RP_PROVIDER_ENDPOINT = 'https://example.com/a/'
OIDC_RP_CLIENT_ID = 'CLIENT_ID'
OIDC_RP_CLIENT_SECRET = 'INSECURE_CLIENT_SECRET'
OIDC_RP_SIGNUP_URL = 'https://example.com/signup/'
|
"""Tools for setting up interactive sessions. """
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://www.sympy.org
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
message += '\n' + verbose_message % {'source': _source}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Example
=======
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s) #doctest: +SKIP
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from StringIO import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
# XXX: Something like this might be used, but it only works on single line
# inputs. See
# http://mail.scipy.org/pipermail/ipython-user/2012-August/010846.html and
# https://github.com/ipython/ipython/issues/1491. So instead we are forced to
# just monkey-patch run_cell until IPython builds a better API.
#
# class IntTransformer(object):
# """
# IPython command line transformer that recognizes and replaces int
# literals.
#
# Based on
# https://bitbucket.org/birkenfeld/ipython-physics/src/71b2d850da00/physics.py.
#
# """
# priority = 99
# enabled = True
# def transform(self, line, continue_prompt):
# import re
# from tokenize import TokenError
# leading_space = re.compile(' *')
# spaces = re.match(leading_space, line).span()[1]
# try:
# return ' '*spaces + int_to_Integer(line)
# except TokenError:
# return line
#
# int_transformer = IntTransformer()
#
# def enable_automatic_int_sympification(app):
# """
# Allow IPython to automatically convert integer literals to Integer.
#
# This lets things like 1/2 be executed as (essentially) Rational(1, 2).
# """
# app.shell.prefilter_manager.register_transformer(int_transformer)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if IPython.__version__ >= '0.11':
# use an app to parse the command line, and init config
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which does what False does.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
try:
import IPython
except ImportError:
if ipython is not True:
if not quiet:
print no_ipython
ip = init_python_session()
mainloop = ip.interact
else:
raise RuntimeError("IPython is not available on this system")
else:
ipython = True
if IPython.__version__ >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
if ip is not None:
in_ipython = True
else:
ip = init_ipython_session(argv=argv,
auto_symbols=auto_symbols, auto_int_to_Integer=auto_int_to_Integer)
if IPython.__version__ >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(
src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
if auto_symbols and (not ipython or IPython.__version__ < '0.11'):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above")
if auto_int_to_Integer and (not ipython or IPython.__version__ < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
ip.set_hook('shutdown_hook', lambda ip: ip.write("Exiting ...\n"))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
import sys
from contextlib import contextmanager
import base64
import datetime
from cryptography.fernet import Fernet
from ..compat import *
from ..exception import InterfaceError
logger = logging.getLogger(__name__)
interfaces = {}
"""holds all configured interfaces"""
def get_interfaces():
global interfaces
return interfaces
def get_interface(connection_name=""):
"""get the configured interface that corresponds to connection_name"""
global interfaces
i = interfaces[connection_name]
return i
def set_interface(interface, connection_name=""):
"""bind an .interface.Interface() instance to connection_name"""
global interfaces
interfaces[connection_name] = interface
class InterfaceMessage(object):
"""this is a thin wrapper around all received interface messages
An instance of this class exposes these properties:
.fields -- contain what will be passed to the backend interface, but also
._count -- how many times this message has been received from the backend interface
._created -- when this message was first sent
"""
@property
def _id(self):
raise NotImplementedError()
@property
def body(self):
"""Return the body of the current internal fields"""
d = self.depart()
return self._encode(d)
@body.setter
def body(self, b):
"""this will take a body and convert it to fields"""
d = self._decode(b)
self.update(fields=d)
def __init__(self, name, interface, raw):
"""
interface -- Interface -- the specific interface to send/receive messages
raw -- mixed -- the raw message the interface returned
"""
self.name = name
self.fields = {} # the original fields you passed to the Interface send method
self.interface = interface
self.raw = raw
self.update()
def send(self):
return self.interface.send(self.name, self)
def ack(self):
return self.interface.ack(self.name, self)
def depart(self):
"""whatever is returned from this method is serialized and placed in the body
of the message that is actually sent through the interface. This can return
anything since it is serialized but you will probably need to mess with
populate() also since the default implementations expect dicts.
return -- mixed -- anything you want to send in the message in the form you
want to send it
"""
return {
"fields": self.fields,
"_count": self._count + 1,
"_created": self._created if self._created else datetime.datetime.utcnow()
}
def populate(self, fields):
"""when a message is read from the interface, the unserialized "fields" of
the returned body will pass through this message.
fields -- mixed -- the body, unserialized, read from the backend interface
"""
if not fields: fields = {}
self.fields = fields.get("fields", fields)
self._count = fields.get("_count", 0)
self._created = fields.get("_created", None)
def update(self, fields=None, body=""):
"""this is the public wrapper around populate(), usually, when you want to
customize functionality you would override populate() and depart() and leave
this method alone"""
# we call this regardless to set defaults
self.populate(fields)
if body:
# this will override any fields (and defaults) that were set in populate
self.body = body
def _encode(self, fields):
"""prepare a message to be sent over the backend
fields -- dict -- the fields to be converted to a string
return -- string -- the message all ready to be sent
"""
ret = pickle.dumps(fields, pickle.HIGHEST_PROTOCOL)
key = self.interface.connection_config.key
if key:
logger.debug("Encrypting fields")
f = Fernet(key)
ret = String(f.encrypt(ByteString(ret)))
else:
ret = String(base64.b64encode(ret))
return ret
def _decode(self, body):
"""this turns a message body back to the original fields
body -- string -- the body to be converted to a dict
return -- dict -- the fields of the original message
"""
key = self.interface.connection_config.key
if key:
logger.debug("Decoding encrypted body")
f = Fernet(key)
ret = f.decrypt(ByteString(body))
else:
ret = base64.b64decode(body)
ret = pickle.loads(ret)
return ret
class Interface(object):
"""base class for interfaces to messaging"""
connected = False
"""true if a connection has been established, false otherwise"""
connection_config = None
"""a config.Connection() instance"""
message_class = InterfaceMessage
"""the interface message class that is used to send/receive the actual messages,
this is different than the message.Message classes, see .create_msg()"""
def __init__(self, connection_config=None):
self.connection_config = connection_config
def create_message(self, name, fields=None, body=None, raw=None):
"""create an interface message that is used to send/receive to the backend
interface, this message is used to keep the api similar across the different
methods and backends"""
interface_msg = self.message_class(name, interface=self, raw=raw)
interface_msg.update(fields=fields, body=body)
return interface_msg
create_msg = create_message
def _connect(self, connection_config): raise NotImplementedError()
def connect(self, connection_config=None):
"""connect to the interface
this will set the raw db connection to self.connection
"""
if self.connected: return self.connected
if connection_config: self.connection_config = connection_config
self.connection_config.options.setdefault('max_timeout', 3600) # 1 hour to process message
self.connection_config.options.setdefault('backoff_multiplier', 5) # failure backoff multiplier
try:
self.connected = False
self._connect(self.connection_config)
self.connected = True
self.log("Connected")
except Exception as e:
raise self.raise_error(e)
return self.connected
def get_connection(self): raise NotImplementedError()
def _close(self): raise NotImplementedError()
def close(self):
"""
close an open connection
"""
if not self.connected: return;
self._close()
self.connected = False
self.log("Closed Connection")
@contextmanager
def connection(self, connection=None, **kwargs):
try:
if connection:
yield connection
else:
if not self.connected: self.connect()
try:
connection = self.get_connection()
yield connection
except:
raise
except Exception as e:
self.raise_error(e)
def _send(self, name, body, connection, **kwargs):
"""similar to self.send() but this takes a body, which is the message
completely encoded and ready to be sent by the backend, instead of an
interface_msg() instance"""
raise NotImplementedError()
def send(self, name, interface_msg, **kwargs):
"""send a message to message queue name
name -- string -- the queue name
interface_msg -- InterfaceMessage() -- an instance of InterfaceMessage, see self.create_message()
**kwargs -- dict -- anything else, this gets passed to self.connection()
"""
if not interface_msg.fields:
raise ValueError("the interface_msg has no fields to send")
with self.connection(**kwargs) as connection:
self._send(name, interface_msg.body, connection=connection, **kwargs)
self.log("Message sent to {} -- {}", name, interface_msg.fields)
def _count(self, name, connection, **kwargs): raise NotImplementedError()
def count(self, name, **kwargs):
"""count how many messages are in queue name"""
with self.connection(**kwargs) as connection:
ret = int(self._count(name, connection=connection))
return ret
def _recv(self, name, connection, **kwargs):
"""return -- tuple -- (body, raw) where body is the string of the
message that needs to be decrypted, and raw is the backend message
object instance, this is returned because things like .ack() might need
it to get an id or something"""
raise NotImplementedError()
def recv(self, name, timeout=None, **kwargs):
"""receive a message from queue name
timeout -- integer -- seconds to try and receive a message before returning None
return -- InterfaceMessage() -- an instance containing fields and raw
"""
with self.connection(**kwargs) as connection:
interface_msg = None
body, raw = self._recv(
name,
connection=connection,
timeout=timeout,
**kwargs
)
if body:
interface_msg = self.create_message(name, body=body, raw=raw)
self.log(
"Message {} received from {} -- {}",
interface_msg._id,
name,
interface_msg.fields
)
return interface_msg
def _release(self, name, interface_msg, connection, **kwargs): raise NotImplementedError()
def release(self, name, interface_msg, **kwargs):
"""release the message back into the queue, this is usually for when processing
the message has failed and so a new attempt to process the message should be made"""
#interface_msg.raw.load()
with self.connection(**kwargs) as connection:
delay_seconds = max(kwargs.get('delay_seconds', 0), 0)
if delay_seconds == 0:
cnt = interface_msg._count
if cnt:
max_timeout = self.connection_config.options.get("max_timeout")
backoff = self.connection_config.options.get("backoff_multiplier")
delay_seconds = min(
max_timeout,
(cnt * backoff) * cnt
)
self._release(name, interface_msg, connection=connection, delay_seconds=delay_seconds)
self.log(
"Message {} released back to {} count {}, with delay {}s",
interface_msg._id,
name,
interface_msg._count,
delay_seconds
)
def _ack(self, name, interface_msg, connection, **kwargs): raise NotImplementedError()
def ack(self, name, interface_msg, **kwargs):
"""this will acknowledge that the interface message was received successfully"""
with self.connection(**kwargs) as connection:
self._ack(name, interface_msg, connection=connection)
self.log("Message {} acked from {}", interface_msg._id, name)
def _clear(self, name, connection, **kwargs): raise NotImplementedError()
def clear(self, name, **kwargs):
"""cliear the queue name"""
with self.connection(**kwargs) as connection:
self._clear(name, connection=connection)
self.log("Messages cleared from {}", name)
def _delete(self, name, connection, **kwargs): raise NotImplementedError()
def unsafe_delete(self, name, **kwargs):
with self.connection(**kwargs) as connection:
self._delete(name, connection=connection)
self.log("Queue {} deleted", name)
def log(self, format_str, *format_args, **log_options):
"""
wrapper around the module's logger
format_str -- string -- the message to log
*format_args -- list -- if format_str is a string containing {}, then format_str.format(*format_args) is ran
**log_options --
level -- something like logging.DEBUG
"""
log_level = getattr(logging, log_options.get('level', "DEBUG").upper())
if logger.isEnabledFor(log_level):
try:
if isinstance(format_str, Exception):
logger.exception(format_str, *format_args)
else:
if format_args:
logger.log(log_level, format_str.format(*format_args))
else:
logger.log(log_level, format_str)
except UnicodeError as e:
logger.error("Unicode error while logging", exc_info=True)
def raise_error(self, e, exc_info=None):
"""this is just a wrapper to make the passed in exception an InterfaceError"""
if not exc_info:
exc_info = sys.exc_info()
if not isinstance(e, InterfaceError):
e = InterfaceError(e, exc_info)
reraise(e.__class__, e, exc_info[2])
|
import time
from . import serial_connection
class StepperMotorDriver(object):
"""
Controls stepper motors.
"""
DEVICE_IDENTIFIER = "SMD"
def __init__(self, device_path: str = ""):
if device_path == "":
device_path = (
serial_connection.search_for_serial_devices(self.DEVICE_IDENTIFIER)
)[0]
print("Connected to", device_path)
self._com = serial_connection.SerialConnection(device_path)
# setvolt 0 1.5; interpol 0 2; setspeed 0 170; zero 0; on 0
def init_motor(self, channel: int):
self._com.write(
"setvolt {} 1.5; interpol {} 2; setspeed {} 170; zero {}; on {}\r\n".format(
channel, channel, channel, channel, channel
).encode()
)
self._com.write(
"setvolt {} 1.5; interpol {} 2; setspeed {} 170; zero {}; on {}\r\n".format(
channel, channel, channel, channel, channel
).encode()
)
def identity(self) -> str:
# self._com.write(b'*IDN?\r\n')
return self._com.getresponse("*idn?")
def help(self) -> str:
return self._com.get_help()
def on(self, channel: int):
"""
Locks the magnet in the stepper motor
"""
self._com.write("on {0}\r\n".format(channel).encode("ascii"))
self._com.write("on {0}\r\n".format(channel).encode("ascii"))
def off(self, channel: int):
"""
Unlocks the magnet in the stepper motor
"""
self._com.write("off {0}\r\n".format(channel).encode("ascii"))
self._com.write("off {0}\r\n".format(channel).encode("ascii"))
def go(self, channel: int, position: int):
"""
Go to absolute position
"""
self._com.write("go {0} {1}\r\n".format(channel, position).encode("ascii"))
self._com.write("go {0} {1}\r\n".format(channel, position).encode("ascii"))
# def set_voltage(self, channel, voltage):
# self._com.write('setvolt {0} {1};'.format(channel, voltage).encode('ascii'))
def get_position(self, channel: int) -> int:
pos1 = int(self._com.getresponse("pos?"))
pos2 = int(self._com.getresponse("pos?"))
if pos1 == pos2:
return pos2
return self.get_position(channel)
def zero(self, channel: int):
"""
Sets current position as zero
"""
self._com.write("zero {}\r\n".format(channel).encode("ascii"))
self._com.write("zero {}\r\n".format(channel).encode("ascii"))
def go_wait(self, channel: int, position: int):
"""
Send the move command to the motor and polls the motor position
until the motor reaches the desired position.
:param position: desired position in steps
"""
self.go(channel, position)
while self.get_position(channel) != position:
time.sleep(0.1)
|
from __future__ import print_function
import json
import sys
import irods_six
try:
import jsonschema
except ImportError:
pass
try:
import requests
except ImportError:
pass
class ValidationError(Exception):
pass
class ValidationWarning(Warning):
pass
def load_and_validate(config_file, schema_uri, verbose=False):
try:
# load configuration file
with open(config_file, 'r') as f:
config_dict = json.load(f)
except BaseException as e:
irods_six.reraise(ValidationError, ValidationError('\n\t'.join([
'ERROR: Validation Failed for [{0}]:'.format(config_file),
'against [{0}]'.format(schema_uri),
'{0}: {1}'.format(e.__class__.__name__, e)])),
sys.exc_info()[2])
validate_dict(config_dict, schema_uri, name=config_file, verbose=verbose)
return config_dict
def validate_dict(config_dict, schema_uri, name=None, verbose=False):
if name is None:
name = schema_uri.rpartition('/')[2]
try:
e = jsonschema.exceptions
except AttributeError:
irods_six.reraise(ValidationWarning, ValidationWarning(
'WARNING: Validation failed for {0} -- jsonschema too old v[{1}]'.format(
name, jsonschema.__version__)),
sys.exc_info()[2])
except NameError:
irods_six.reraise(ValidationWarning, ValidationWarning(
'WARNING: Validation failed for {0} -- jsonschema not installed'.format(
name)),
sys.exc_info()[2])
try:
# load the schema url
try:
response = requests.get(schema_uri)
except NameError:
irods_six.reraise(ValidationError, ValidationError(
'WARNING: Validation failed for {0} -- requests not installed'.format(
name)),
sys.exc_info()[2])
# check response values
try:
# modern requests
schema = json.loads(response.text)
except AttributeError:
# requests pre-v1.0.0
response.encoding = 'utf8'
schema = json.loads(response.content)
# validate
jsonschema.validate(config_dict, schema)
except (
jsonschema.exceptions.RefResolutionError, # could not resolve recursive schema $ref
ValueError # most network errors and 404s
) as e:
irods_six.reraise(ValidationWarning, ValidationWarning('\n\t'.join([
'WARNING: Validation Failed for [{0}]:'.format(name),
'against [{0}]'.format(schema_uri),
'{0}: {1}'.format(e.__class__.__name__, e)])),
sys.exc_info()[2])
except (
jsonschema.exceptions.ValidationError,
jsonschema.exceptions.SchemaError,
BaseException
) as e:
irods_six.reraise(ValidationError, ValidationError('\n\t'.join([
'ERROR: Validation Failed for [{0}]:'.format(name),
'against [{0}]'.format(schema_uri),
'{0}: {1}'.format(e.__class__.__name__, e)])),
sys.exc_info()[2])
if verbose and name:
print("Validating [{0}]... Success".format(name))
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: {0} <configuration_file> <schema_url>'.format(sys.argv[0]),
file=sys.stderr)
sys.exit(1)
config_file = sys.argv[1]
schema_uri = sys.argv[2]
try:
load_and_validate(config_file, schema_uri, verbose=True)
except ValidationError as e:
print(e, file=sys.stderr)
sys.exit(1)
except ValidationWarning as e:
print(e, file=sys.stderr)
sys.exit(0)
|
from typing import Dict
import blspy
from venidium.full_node.bundle_tools import simple_solution_generator
from venidium.types.blockchain_format.coin import Coin
from venidium.types.blockchain_format.program import Program
from venidium.types.coin_spend import CoinSpend
from venidium.types.condition_opcodes import ConditionOpcode
from venidium.types.generator_types import BlockGenerator
from venidium.types.spend_bundle import SpendBundle
from venidium.util.ints import uint64
from venidium.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk, solution_for_conditions
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
def int_to_public_key(index: int) -> blspy.G1Element:
index = index % GROUP_ORDER
private_key_from_int = blspy.PrivateKey.from_bytes(index.to_bytes(32, "big"))
return private_key_from_int.get_g1()
def puzzle_hash_for_index(index: int, puzzle_hash_db: dict) -> bytes:
public_key = bytes(int_to_public_key(index))
puzzle = puzzle_for_pk(public_key)
puzzle_hash = puzzle.get_tree_hash()
puzzle_hash_db[puzzle_hash] = puzzle
return puzzle_hash
def make_fake_coin(index: int, puzzle_hash_db: dict) -> Coin:
"""
Make a fake coin with parent id equal to the index (ie. a genesis block coin)
"""
parent = index.to_bytes(32, "big")
puzzle_hash = puzzle_hash_for_index(index, puzzle_hash_db)
amount = 100000
return Coin(parent, puzzle_hash, uint64(amount))
def conditions_for_payment(coin) -> Program:
d: Dict = {} # a throwaway db since we don't care
new_puzzle_hash = puzzle_hash_for_index(int.from_bytes(coin.puzzle_hash, "big"), d)
return Program.to([[ConditionOpcode.CREATE_COIN, new_puzzle_hash, coin.amount]])
def make_spend_bundle(count: int) -> SpendBundle:
puzzle_hash_db: Dict = dict()
coins = [make_fake_coin(_, puzzle_hash_db) for _ in range(count)]
coin_spends = []
for coin in coins:
puzzle_reveal = puzzle_hash_db[coin.puzzle_hash]
conditions = conditions_for_payment(coin)
solution = solution_for_conditions(conditions)
coin_spend = CoinSpend(coin, puzzle_reveal, solution)
coin_spends.append(coin_spend)
spend_bundle = SpendBundle(coin_spends, blspy.G2Element())
return spend_bundle
def make_block_generator(count: int) -> BlockGenerator:
spend_bundle = make_spend_bundle(count)
return simple_solution_generator(spend_bundle)
|
"""STTP package root."""
from . import errors
from . import ext
from . import subst
from . import pkg_meta
from . import core
from .parser import Parser
__version__ = pkg_meta.version
__all__ = [
'Parser',
'errors',
'ext',
'subst',
'pkg_meta',
'core',
]
|
import numpy as np
import torch
import torch.utils.data as data
import data.util as util
class LQ_Dataset(data.Dataset):
'''Read LQ images only in the test phase.'''
def __init__(self, opt):
super(LQ_Dataset, self).__init__()
self.opt = opt
self.paths_LQ = None
self.LQ_env = None # environment for lmdb
# read image list from lmdb or image files
print(opt['data_type'],opt['dataroot_LQ'])
self.LQ_env, self.paths_LQ = util.get_image_paths(opt['data_type'], opt['dataroot_LQ'])
assert self.paths_LQ, 'Error: LQ paths are empty.'
def __getitem__(self, index):
LQ_path = None
# get LQ image
LQ_path = self.paths_LQ[index]
img_LQ = util.read_img(self.LQ_env, LQ_path)
H, W, C = img_LQ.shape
# condition
if self.opt['condition'] == 'image':
cond = img_LQ.copy()
elif self.opt['condition'] == 'gradient':
cond = util.calculate_gradient(img_LQ)
# BGR to RGB, HWC to CHW, numpy to tensor
if img_LQ.shape[2] == 3:
img_LQ = img_LQ[:, :, [2, 1, 0]]
cond = cond[:, :, [2, 1, 0]]
img_LQ = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQ, (2, 0, 1)))).float()
cond = torch.from_numpy(np.ascontiguousarray(np.transpose(cond, (2, 0, 1)))).float()
return {'LQ': img_LQ, 'LQ_path': LQ_path, 'cond': cond}
def __len__(self):
return len(self.paths_LQ)
|
#!/usr/bin/env python
"""
Parse the NCES Data File Format Data files. Either return a list/dict of items
from the Date Files or cache back as reduced set of the data into much smaller
data files.
Can be run from the command line to pre-filter the NCES raw data into a reduced dataset
to speed up run time. This can also be done by importing the class itself, but I
exposed it to the command line because this is handy for doing quick experiments.
"""
import argparse
import re
import os
import csv
# Unit testing
import unittest
from fips import fips_to_st
from filters.urban import urban_dist
from filters.big import big_dist
from filters.tuda import tuda_dist
from filters.ca_big import ca_big_dist
from filters.city_zips import sjzips
from data.nces_get import FIRST_YEAR
from data.nces_get import LAST_YEAR
# ==============================================================================
# Constants and RegEx
# ==============================================================================
re_idx_header = re.compile(r'Name\s+Order\s+Type\s+Description')
# Name Type Position Size Description
re_definition = re.compile(r'^(\w+)\s+(\w+)\s+(\d+)[-](\d+)\s+(\d+)[*]?\s+(.*)$')
re_sub_definition = re.compile(r'^\s*[+](\w+)\s+(\w+)\s+(\d+)[-](\d+)\s+(\d+)\s+(.*)$')
# Variable Start End Field Data
# Name Pos. Pos. Length Type Description
re_alt_definition = re.compile(r'^(\w+)\s+(\d+)\s+(\d+)\s+(\d+)*?\s+(\w+)\s+(.*)$')
re_alt_sub_definition = re.compile(r'^\s*[+](\w+)\s+(\d+)\s+(\d+)\s+(\d+)*?\s+(\w+)\s+(.*)$')
# Index Data Format
# Variable Data
# Name Order Type Description
re_idx_definition = re.compile(r'^[+]?(\w+)\s+(\d+)[*]?\s+(\w+)\s+(.*)$')
datafile_name = "nces%02d-%02d.txt"
saved_datafile_name = "nces%02d-%02d.csv"
formatfile_name = "nces%02d-%02d_layout.txt"
# ==============================================================================
# Utility Functions
# ==============================================================================
# --------------------------------------
def strip_comment(line, comment_char='#'):
"""
Strip inline comments -
"""
idx = line.find(comment_char)
if idx != -1:
line = line[:idx]
return line.strip()
# --------------------------------------
def longlines(rawdata):
"""
Generator to merge lines in a text file that end with a "\"
"""
lines = []
for i in rawdata.splitlines():
if i.endswith("\\"):
lines.append(i.rstrip("\\"))
else:
lines.append(i)
yield "".join(lines)
lines = []
if len(lines) > 0: yield "".join(lines)
# ==============================================================================
# Parser Class
# ==============================================================================
class NCESParser(object):
"""
Parsing Instructions for an NCES data file.
The instructions consist of list of columns in the database along
with information needed to pull the column from the file. Typically
it will be a string index:
[('COLUMN_NAME', idx), ('COLUMN_NAME', idx), ...]
"""
def __init__(self, year, debug=False):
self.debug = debug
self.parse_instr = []
self.header_count = 0
self.headers = []
self.descriptions = {}
self.index_mode = 0
self.save_names = [
"FIPS", # State FIPS numerical representation
"LEAID", # School District ID Number
"LEANM", # School District Name
"SCHNAM", # School name
"CITY", # School City
"STATE", # School State
"ZIP", # School Zip
"BLACK", # Number of African American Students
"HISP", # Number of Hispanic (non-white) Students
"ASIAN", # Number of Students of Asian Decent (South and East)
"AM", # Number of American Indian Students
"WHITE", # Number of Caucasian Students
"MEMBER", # Number of Students
"FRELCH", # Number of Free Lunch Eligible students
"REDLCH", # Number of Reduced Price Lunch Eligible Students
"TYPE", # School Type (Regular, Special Ed, Alternative, etc...)
"STATUS", # School Status
"GSHI", # Highest Grade Offered
"GSLO", # Lowest Grade Offered
"CHARTR", # Charter School
"MAGNET", # Magnet School
"LOCALE", # School Urban Level (Rural, Suburban, Small City, Urban...)
"ULOCAL" # Urban School (located within a urban area)
]
self.year = year
self.formatfile = self.get_formatfile_name()
def __repr__(self):
results = ""
for instr in self.parse_instr:
if self.index_mode:
results += "Name: %s, Index: %d\n" % (instr[0], instr[2])
else:
results += "Name: %s, Size: %d\n" % (instr[0], instr[3] - instr[2] + 1)
return results
# --------------------------------------
def get_formatfile_name(self):
return self.get_filename(formatfile_name)
# --------------------------------------
def get_datafile_name(self):
return self.get_filename(datafile_name)
# --------------------------------------
def get_saved_datafile_name(self):
return self.get_filename(saved_datafile_name)
# --------------------------------------
def get_filename(self, name_str):
"""
Construct a filename from the base name, a year and
the local directory structure.
"""
fname = name_str % (self.year%100, (self.year+1)%100)
this_dir, this_filename = os.path.split(__file__)
fname = os.path.join(this_dir, 'data', fname)
return fname
# ==============================================================================
# Read the Format file into a usable data structure
# ==============================================================================
# --------------------------------------
def read_formatfile(self, formatfile):
if self.debug:
print "=" * 80
print "Reading Format File: %s" % formatfile
print "=" * 80
fh = open(formatfile, 'rb')
for line in fh:
if re_idx_header.search(line):
if self.index_mode == 0:
print "Switching to Index MODE!!!"
self.index_mode = 1
if re_definition.search(line):
col_name, type, loidx, hiidx, size, description = re_definition.search(line).groups()
elif re_sub_definition.search(line):
col_name, type, loidx, hiidx, size, description = re_sub_definition.search(line).groups()
elif re_alt_definition.search(line):
col_name, loidx, hiidx, size, type, description = re_alt_definition.search(line).groups()
elif re_alt_sub_definition.search(line):
col_name, loidx, hiidx, size, type, description = re_alt_sub_definition.search(line).groups()
elif self.index_mode and re_idx_definition.search(line):
col_name, loidx, type, description = re_idx_definition.search(line).groups()
hiidx = loidx
size = 0
else:
if self.debug:
print line
continue
# Filter out a problematic NCES year to year changes
if col_name[:5] == "FIPST":
col_name = "FIPS"
type = 'AN'
if col_name[:4] == "FIPS":
type = 'AN'
if col_name[:3] == "ZIP" and col_name[:4] != "ZIP4":
col_name = "ZIP"
if col_name[:4] == "LZIP" and col_name[:5] != "LZIP4" : # ZIP or Location ZIP (not Mailing ZIP)
col_name = "ZIP"
if col_name[:4] == "GSL0": # 1994 typo in the format file.
col_name = "GSLO"
if col_name == "FLE%02d" % (self.year%100):
col_name = "FRELCH"
if col_name == ("IND%02d" % (self.year%100)):
col_name = "AM"
elif col_name == "IND":
col_name = "AM"
if col_name[:4] == "CITY" or col_name[:5] == "LCITY":
col_name = "CITY"
if col_name[:4] == "STATE" or col_name[:5] == "LSTATE":
col_name = "STATE"
if (col_name[:5] == "LEAID" or
col_name[:4] == "TYPE" or
col_name[:6] == "STATUS"):
type = 'AN'
self.add_instr(col_name, type, loidx, hiidx, size, description)
if self.debug:
print "=" * 80
print "Format File Parsing Complete"
print "=" * 80
import pprint
pprint.pprint(self.parse_instr)
print "=" * 80
print "\n"
# --------------------------------------
def add_instr(self, col_name, type, loidx, hiidx, size, description):
if self.debug:
print "Found Column: %s - %s" % (col_name, size)
# Strip the year off the column if it is present
# We store the year in the main data object
if col_name[-2:].isdigit():
col_name = col_name[:-2]
# Is it a number?
if type == 'N':
pass
if col_name in self.save_names:
self.parse_instr.append((col_name, type, int(loidx)-1, int(hiidx), description.strip()))
self.add_column(col_name, description)
# --------------------------------------
def add_column(self, col_name, description):
self.headers.append(col_name)
desc = description.strip().split('\t')[0] # Filter out any Tab characters
self.descriptions[col_name] = desc
self.header_count += 1
# --------------------------------------
def get_headers(self):
return self.headers
# --------------------------------------
def get_idx(self, col_name):
try:
return self.name_idx_dict[col_name]
except AttributeError:
self.name_idx_dict = {}
for i, name in enumerate(self.headers):
self.name_idx_dict[name] = i
return self.name_idx_dict[col_name]
# --------------------------------------
def get_descriptions(self):
return ",".join(self.descriptions)
# --------------------------------------
def parse_line(self, line):
entry = []
for instr in self.parse_instr:
if self.index_mode:
field = line[instr[2]]
else:
field = line[instr[2]:instr[3]] # Python array slicing rules low_idx : high_idx + 1
if self.debug:
print field
if instr[1] == 'N': # Number Type
try:
field = float(field)
except ValueError:
field = -1.0 # Squash invalid values to -1 for fields of type 'Number'
else:
field = field.strip() # Otherwise clean up the string
entry.append(field)
return entry
# --------------------------------------
def parse_orig(self, datafile="", make_dict=False):
# Read the format file
self.read_formatfile(self.formatfile)
# Now open the data file
if datafile:
fname = datafile
else:
fname = self.get_datafile_name()
fh = open(fname, 'rb')
if self.index_mode:
fh = csv.reader(fh, dialect='excel-tab')
line = fh.next() # Pop the header line
skip_count = 0
self.schools = []
for line in fh:
if make_dict:
school = self.make_dict(self.parse_line(line))
if school['LEANM'].startswith("NEW YORK CITY GEOGRAPHIC DISTRICT"):
school['LEAID'] = "3620580"
school['LEANM'] = "NEW YORK CITY GEOGRAPHIC DISTRICTS"
if school['FIPS'] in fips_to_st.keys():
self.schools.append(school)
else:
skip_count += 1
else:
school = self.parse_line(line)
if school[self.get_idx('LEANM')].startswith("NEW YORK CITY GEOGRAPHIC DISTRICT"):
school[self.get_idx('LEAID')] = "3620580"
school[self.get_idx('LEANM')] = "NEW YORK CITY GEOGRAPHIC DISTRICTS"
if school[self.get_idx('FIPS')] in fips_to_st.keys():
self.schools.append(school)
else:
skip_count += 1
if self.debug:
print "Found %d Schools" % len(self.schools)
print "Skipped %d Schools" % skip_count
return self.schools
# --------------------------------------
def parse_saved(self, make_dict=False):
saved_fname = self.get_saved_datafile_name()
fh = open(saved_fname, 'rb')
if make_dict:
cfh = csv.DictReader(fh, quoting=csv.QUOTE_NONNUMERIC)
self.headers = cfh.fieldnames
else:
cfh = csv.reader(fh, quoting=csv.QUOTE_NONNUMERIC)
self.headers = cfh.next()
self.schools = []
for line in cfh:
self.schools.append(line)
if self.debug:
print len(self.schools)
return self.schools
# --------------------------------------
def parse(self, datafile="", make_dict=False, forced_orig=False):
if forced_orig or datafile:
return self.parse_orig(datafile, make_dict)
else:
saved_fname = self.get_saved_datafile_name()
try:
open(saved_fname, 'rb')
print "Loading Previously Saved CSV Data Set"
return self.parse_saved(make_dict)
except IOError:
print "Parsing the NCES Data Set"
return self.parse_orig(datafile, make_dict)
# --------------------------------------
def make_dict(self, school):
if self.debug:
print school
return dict(zip(self.headers, school))
# --------------------------------------
def save_parsed_data(self, filter=False, idx="", idx_list=[]):
"""
Save out the parsed data
"""
fname = self.get_saved_datafile_name()
fh = open(fname, 'wb')
cfh = csv.writer(fh, quoting=csv.QUOTE_NONNUMERIC)
cfh.writerow(self.get_headers())
count = 0
for school in self.schools:
if (not filter or
filter and school[self.get_idx(idx)] in idx_list):
if self.debug:
print school
cfh.writerow(school)
count += 1
print "Saved %d Entries to CSV File %s" % (count, fname)
# *****************************************************************************
# Unit Tests
# *****************************************************************************
class TestBasicNetwork(unittest.TestCase):
def setUp(self):
self.parse = NCESParser('data/school.test', 'data/school.format')
def test_old_style(self):
self.assertEqual(self.parse.something, ["Test"])
# *****************************************************************************
# Program Flow
# *****************************************************************************
# -------------------------------------
# Parse the command line options
# -------------------------------------
def main():
parser = argparse.ArgumentParser(description='NCES Data File Parser')
# dataset idx/val matching - if data[idx] == val then keep this data, filter out all other (e.g. data[state] == CA)
parser.add_argument('--match_idx', action='store', dest='match_idx', required=False,
help='Only use data points that match some criterion')
parser.add_argument('--match_val', nargs='+', action='store', dest='match_val', required=False,
help='Value to match when using --match_idx')
# Prepackaged match-idx/val pairs
parser.add_argument('-urban_only', action='store_true', dest='urban_only', required=False,
help='Filter out non-Urban Districts')
parser.add_argument('-big_only', action='store_true', dest='big_only', required=False,
help='Biggest 50 Districts')
parser.add_argument('-ca_big_only', action='store_true', dest='ca_big_only', required=False,
help='Biggest CA Districts')
parser.add_argument('-tuda_only', action='store_true', dest='tuda_only', required=False,
help='Select only Districts in the NAEP TUDA List')
parser.add_argument('-sjzips', action='store_true', dest='sjzips', required=False,
help='Select only Districts in San Jose, CA')
# Other Options
parser.add_argument('-debug', action='store_true',
help='Print Debug Messages')
args = parser.parse_args()
# print args
# -------------------------------------
# Actually do the work we intend to do here
# -------------------------------------
for year in range(FIRST_YEAR, LAST_YEAR+1):
print "=" * 80
print "Saving out a reduced dataset for %d" % year
print "=" * 80
parser = NCESParser(year=year, debug=args.debug)
parser.parse(forced_orig=True)
if args.urban_only:
parser.save_parsed_data(filter=True, idx="LEAID", idx_list=urban_dist)
if args.big_only:
parser.save_parsed_data(filter=True, idx="LEAID", idx_list=big_dist)
if args.ca_big_only:
parser.save_parsed_data(filter=True, idx="LEAID", idx_list=ca_big_dist)
elif args.tuda_only:
parser.save_parsed_data(filter=True, idx="LEAID", idx_list=tuda_dist)
elif args.sjzips:
parser.save_parsed_data(filter=True, idx="ZIP", idx_list=sjzips)
elif args.match_idx:
parser.save_parsed_data(filter=True, idx=args.match_idx, idx_list=args.match_val)
else:
parser.save_parsed_data()
# -------------------------------------
# Drop the script name from the args
# and call our command line parser
# -------------------------------------
if __name__ == "__main__":
main()
|
from typing import Optional
from hypothesis import given
from tests.base_test_case import BaseTestCase
from electionguard.constants import (
get_small_prime,
get_large_prime,
get_generator,
get_cofactor,
)
from electionguard.group import (
ElementModP,
ElementModQ,
a_minus_b_q,
mult_inv_p,
ONE_MOD_P,
mult_p,
ZERO_MOD_P,
ONE_MOD_Q,
g_pow_p,
ZERO_MOD_Q,
int_to_p,
int_to_q,
add_q,
div_q,
div_p,
a_plus_bc_q,
)
from electionguard.utils import (
flatmap_optional,
get_or_else_optional,
match_optional,
get_optional,
)
from electionguardtest.group import (
elements_mod_p_no_zero,
elements_mod_p,
elements_mod_q,
elements_mod_q_no_zero,
)
class TestEquality(BaseTestCase):
"""Math equality tests"""
@given(elements_mod_q(), elements_mod_q())
def test_p_not_equal_to_q(self, q: ElementModQ, q2: ElementModQ):
p = ElementModP(q)
p2 = ElementModP(q2)
# same value should imply they're equal
self.assertEqual(p, q)
self.assertEqual(q, p)
if q != q2:
# these are genuinely different numbers
self.assertNotEqual(q, q2)
self.assertNotEqual(p, p2)
self.assertNotEqual(q, p2)
self.assertNotEqual(p, q2)
# of course, we're going to make sure that a number is equal to itself
self.assertEqual(p, p)
self.assertEqual(q, q)
class TestModularArithmetic(BaseTestCase):
"""Math Modular Arithmetic tests"""
@given(elements_mod_q())
def test_add_q(self, q: ElementModQ):
as_int = add_q(q, 1)
as_elem = add_q(q, ElementModQ(1))
self.assertEqual(as_int, as_elem)
@given(elements_mod_q())
def test_a_plus_bc_q(self, q: ElementModQ):
as_int = a_plus_bc_q(q, 1, 1)
as_elem = a_plus_bc_q(q, ElementModQ(1), ElementModQ(1))
self.assertEqual(as_int, as_elem)
@given(elements_mod_q())
def test_a_minus_b_q(self, q: ElementModQ):
as_int = a_minus_b_q(q, 1)
as_elem = a_minus_b_q(q, ElementModQ(1))
self.assertEqual(as_int, as_elem)
@given(elements_mod_q())
def test_div_q(self, q: ElementModQ):
as_int = div_q(q, 1)
as_elem = div_q(q, ElementModQ(1))
self.assertEqual(as_int, as_elem)
@given(elements_mod_p())
def test_div_p(self, p: ElementModQ):
as_int = div_p(p, 1)
as_elem = div_p(p, ElementModP(1))
self.assertEqual(as_int, as_elem)
def test_no_mult_inv_of_zero(self):
self.assertRaises(Exception, mult_inv_p, ZERO_MOD_P)
@given(elements_mod_p_no_zero())
def test_mult_inverses(self, elem: ElementModP):
inv = mult_inv_p(elem)
self.assertEqual(mult_p(elem, inv), ONE_MOD_P)
@given(elements_mod_p())
def test_mult_identity(self, elem: ElementModP):
self.assertEqual(elem, mult_p(elem))
def test_mult_noargs(self):
self.assertEqual(ONE_MOD_P, mult_p())
def test_add_noargs(self):
self.assertEqual(ZERO_MOD_Q, add_q())
def test_properties_for_constants(self):
self.assertNotEqual(get_generator(), 1)
self.assertEqual(
(get_cofactor() * get_small_prime()) % get_large_prime(),
get_large_prime() - 1,
)
self.assertLess(get_small_prime(), get_large_prime())
self.assertLess(get_generator(), get_large_prime())
self.assertLess(get_cofactor(), get_large_prime())
def test_simple_powers(self):
gp = int_to_p(get_generator())
self.assertEqual(gp, g_pow_p(ONE_MOD_Q))
self.assertEqual(ONE_MOD_P, g_pow_p(ZERO_MOD_Q))
@given(elements_mod_q())
def test_in_bounds_q(self, q: ElementModQ):
self.assertTrue(q.is_in_bounds())
too_big = q + get_small_prime()
too_small = q - get_small_prime()
self.assertFalse(ElementModQ(too_big, False).is_in_bounds())
self.assertFalse(ElementModQ(too_small, False).is_in_bounds())
self.assertEqual(None, int_to_q(too_big))
self.assertEqual(None, int_to_q(too_small))
with self.assertRaises(OverflowError):
ElementModQ(too_big)
with self.assertRaises(OverflowError):
ElementModQ(too_small)
@given(elements_mod_p())
def test_in_bounds_p(self, p: ElementModP):
self.assertTrue(p.is_in_bounds())
too_big = p + get_large_prime()
too_small = p - get_large_prime()
self.assertFalse(ElementModP(too_big, False).is_in_bounds())
self.assertFalse(ElementModP(too_small, False).is_in_bounds())
self.assertEqual(None, int_to_p(too_big))
self.assertEqual(None, int_to_p(too_small))
with self.assertRaises(OverflowError):
ElementModP(too_big)
with self.assertRaises(OverflowError):
ElementModP(too_small)
@given(elements_mod_q_no_zero())
def test_in_bounds_q_no_zero(self, q: ElementModQ):
self.assertTrue(q.is_in_bounds_no_zero())
self.assertFalse(ZERO_MOD_Q.is_in_bounds_no_zero())
self.assertFalse(
ElementModQ(q + get_small_prime(), False).is_in_bounds_no_zero()
)
self.assertFalse(
ElementModQ(q - get_small_prime(), False).is_in_bounds_no_zero()
)
@given(elements_mod_p_no_zero())
def test_in_bounds_p_no_zero(self, p: ElementModP):
self.assertTrue(p.is_in_bounds_no_zero())
self.assertFalse(ZERO_MOD_P.is_in_bounds_no_zero())
self.assertFalse(
ElementModP(p + get_large_prime(), False).is_in_bounds_no_zero()
)
self.assertFalse(
ElementModP(p - get_large_prime(), False).is_in_bounds_no_zero()
)
@given(elements_mod_q())
def test_large_values_rejected_by_int_to_q(self, q: ElementModQ):
oversize = q + get_small_prime()
self.assertEqual(None, int_to_q(oversize))
class TestOptionalFunctions(BaseTestCase):
"""Math Optional Functions tests"""
def test_unwrap(self):
good: Optional[int] = 3
bad: Optional[int] = None
self.assertEqual(get_optional(good), 3)
self.assertRaises(Exception, get_optional, bad)
def test_match(self):
good: Optional[int] = 3
bad: Optional[int] = None
self.assertEqual(5, match_optional(good, lambda: 1, lambda x: x + 2))
self.assertEqual(1, match_optional(bad, lambda: 1, lambda x: x + 2))
def test_get_or_else(self):
good: Optional[int] = 3
bad: Optional[int] = None
self.assertEqual(3, get_or_else_optional(good, 5))
self.assertEqual(5, get_or_else_optional(bad, 5))
def test_flatmap(self):
good: Optional[int] = 3
bad: Optional[int] = None
self.assertEqual(5, get_optional(flatmap_optional(good, lambda x: x + 2)))
self.assertIsNone(flatmap_optional(bad, lambda x: x + 2))
|
from output.models.nist_data.list_pkg.time.schema_instance.nistschema_sv_iv_list_time_length_2_xsd.nistschema_sv_iv_list_time_length_2 import NistschemaSvIvListTimeLength2
__all__ = [
"NistschemaSvIvListTimeLength2",
]
|
import numpy as np
from pybrain.tools.shortcuts import buildNetwork
import pygame
class CoopGame(object):
""" Class that runs and renders the game """
window = None
DIM = (600, 600)
FPS = 24
DT = 1.0/FPS
players = []
bullets = []
def __init__(self, render=False, max_moves=200):
super(CoopGame, self).__init__()
self.render = render
self.max_moves = max_moves
if(self.render):
pygame.init()
self.window = pygame.display.set_mode(self.DIM)
self.play = self._render_and_play
def setup(self, players):
self.players = players
self.results = np.zeros(len(players))
self.moves = np.zeros(len(players))
self.rewards = np.zeros((2, len(players)))
radius = float(min(self.DIM)) / 3
d_theta = 2 * np.pi / len(players)
center = np.array(self.DIM) / 2
for ind, player in enumerate(players):
assert(player.object_id == ind)
pos = center + np.array([radius * np.cos(d_theta * ind), radius * np.sin(d_theta * ind)])
player.setup(pos, -d_theta * ind, np.pi / 3)
self.bullets = []
def _turn(self, current_turn):
self.rewards[0,:].fill(0)
for i, obj in enumerate(self.players):
move = obj.pre_update(self.players, self.bullets)
self.moves[i] = move
if(move not in obj.past_moves):
obj.past_moves.add(move)
self.rewards[0, i] += 3*(len(obj.past_moves)+1)
for i, obj in enumerate(self.players):
obj.update(self.moves[i], self.players, self.bullets, self.DIM, self.DT)
for obj in self.bullets:
obj.update(self.players, self.bullets, self.DIM, self.DT)
# Check for collisions
for player in self.players:
for bullet in self.bullets:
if(player.collision(bullet.position)):
if(bullet.firer_id == player.object_id):
continue
firer = filter(lambda x: x.object_id == bullet.firer_id, self.players)[0]
self.rewards[0, bullet.firer_id] += 10 if player.team != firer.team else -10
self.rewards[0, player.object_id] -= 10
bullet.active = False
#Remove innactive objects
self.bullets = filter(lambda x: x.active, self.bullets)
#Reward players
for i, player in enumerate(self.players):
player.reward(self.rewards[int(current_turn==self.max_moves-1),i])
#Update scores
self.rewards[1,:] += self.rewards[0,:]
def _render(self):
for obj in self.players:
obj.render(self.window)
for obj in self.bullets:
obj.render(self.window)
def play(self, players, results):
self.setup(players)
#Main loop
current_turn = 0
while current_turn < self.max_moves:
self._turn(current_turn)
current_turn+=1
#Update the results
for player in self.players:
results[player.team, player.individual_id, 0] += self.rewards[1,player.object_id]
results[player.team, player.individual_id, 1] += 1
def _render_and_play(self, players, results):
self.setup(players)
render_clock = pygame.time.Clock()
#Main loop
current_turn = 0
while current_turn < self.max_moves:
pygame.display.flip()
self.window.fill((0, 0, 0))
self.window.lock()
self._turn(current_turn)
self._render()
current_turn+=1
self.window.unlock()
#Tick the clock
render_clock.tick(self.FPS)
#Update the results
for player in self.players:
results[player.team, player.individual_id, 0] += player.score
results[player.team, player.individual_id, 1] += 1
|
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'subdir_file',
'type': 'none',
'msvs_cygwin_shell': 0,
'actions': [
{
'action_name': 'make-subdir-file',
'inputs': [
'make-subdir-file.py',
],
'outputs': [
'<(PRODUCT_DIR)/subdir_file.out',
],
'action': [
'python', '<(_inputs)', '<@(_outputs)',
],
'process_outputs_as_sources': 1,
}
],
},
],
}
|
from itertools import permutations
from operator import (
add,
ge,
gt,
le,
lt,
methodcaller,
mul,
ne,
)
from unittest import TestCase
import numpy
from numpy import (
arange,
array,
eye,
float64,
full,
isnan,
zeros,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from zipline.pipeline import Factor, Filter
from zipline.pipeline.expression import (
NumericalExpression,
NUMEXPR_MATH_FUNCS,
)
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.utils.test_utils import check_arrays
class F(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class G(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class H(Factor):
dtype = float64_dtype
inputs = ()
window_length = 0
class NonExprFilter(Filter):
inputs = ()
window_length = 0
class DateFactor(Factor):
dtype = datetime64ns_dtype
inputs = ()
window_length = 0
class NumericalExpressionTestCase(TestCase):
def setUp(self):
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.f = F()
self.g = G()
self.h = H()
self.d = DateFactor()
self.fake_raw_data = {
self.f: full((5, 5), 3, float),
self.g: full((5, 5), 2, float),
self.h: full((5, 5), 1, float),
self.d: full((5, 5), 0, dtype='datetime64[ns]'),
}
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def check_output(self, expr, expected):
result = expr._compute(
[self.fake_raw_data[input_] for input_ in expr.inputs],
self.mask.index,
self.mask.columns,
self.mask.values,
)
check_arrays(result, expected)
def check_constant_output(self, expr, expected):
self.assertFalse(isnan(expected))
return self.check_output(expr, full((5, 5), expected, float))
def test_validate_good(self):
f = self.f
g = self.g
NumericalExpression("x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 ", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + 2", (f,), dtype=float64_dtype)
NumericalExpression("2 * x_0", (f,), dtype=float64_dtype)
NumericalExpression("x_0 + x_1", (f, g), dtype=float64_dtype)
NumericalExpression("x_0 + x_1 + x_0", (f, g), dtype=float64_dtype)
NumericalExpression("x_0 + 1 + x_1", (f, g), dtype=float64_dtype)
def test_validate_bad(self):
f, g, h = self.f, self.g, self.h
# Too few inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f,), dtype=float64_dtype)
# Too many inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (f, g), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f, g, h), dtype=float64_dtype)
# Invalid variable name.
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f,), dtype=float64_dtype)
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f, g), dtype=float64_dtype)
# Variable index must start at 0.
with self.assertRaises(ValueError):
NumericalExpression("x_1", (f,), dtype=float64_dtype)
# Scalar operands must be numeric.
with self.assertRaises(TypeError):
"2" + f
with self.assertRaises(TypeError):
f + "2"
with self.assertRaises(TypeError):
f > "2"
# Boolean binary operators must be between filters.
with self.assertRaises(TypeError):
f + (f > 2)
with self.assertRaises(TypeError):
(f > f) > f
def test_combine_datetimes(self):
with self.assertRaises(TypeError) as e:
self.d + self.d
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] + datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of dtype "
"'float64'."
)
self.assertEqual(message, expected)
# Confirm that * shows up in the error instead of +.
with self.assertRaises(TypeError) as e:
self.d * self.d
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] * datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of dtype "
"'float64'."
)
self.assertEqual(message, expected)
def test_combine_datetime_with_float(self):
# Test with both float-type factors and numeric values.
for float_value in (self.f, float64(1.0), 1.0):
for op, sym in ((add, '+'), (mul, '*')):
with self.assertRaises(TypeError) as e:
op(self.f, self.d)
message = e.exception.args[0]
expected = (
"Don't know how to compute float64 {sym} datetime64[ns].\n"
"Arithmetic operators are only supported on Factors of "
"dtype 'float64'."
).format(sym=sym)
self.assertEqual(message, expected)
with self.assertRaises(TypeError) as e:
op(self.d, self.f)
message = e.exception.args[0]
expected = (
"Don't know how to compute datetime64[ns] {sym} float64.\n"
"Arithmetic operators are only supported on Factors of "
"dtype 'float64'."
).format(sym=sym)
self.assertEqual(message, expected)
def test_negate_datetime(self):
with self.assertRaises(TypeError) as e:
-self.d
message = e.exception.args[0]
expected = (
"Can't apply unary operator '-' to instance of "
"'DateFactor' with dtype 'datetime64[ns]'.\n"
"'-' is only supported for Factors of dtype 'float64'."
)
self.assertEqual(message, expected)
def test_negate(self):
f, g = self.f, self.g
self.check_constant_output(-f, -3.0)
self.check_constant_output(--f, 3.0)
self.check_constant_output(---f, -3.0)
self.check_constant_output(-(f + f), -6.0)
self.check_constant_output(-f + -f, -6.0)
self.check_constant_output(-(-f + -f), 6.0)
self.check_constant_output(f + -g, 1.0)
self.check_constant_output(f - -g, 5.0)
self.check_constant_output(-(f + g) + (f + g), 0.0)
self.check_constant_output((f + g) + -(f + g), 0.0)
self.check_constant_output(-(f + g) + -(f + g), -10.0)
def test_add(self):
f, g = self.f, self.g
self.check_constant_output(f + g, 5.0)
self.check_constant_output((1 + f) + g, 6.0)
self.check_constant_output(1 + (f + g), 6.0)
self.check_constant_output((f + 1) + g, 6.0)
self.check_constant_output(f + (1 + g), 6.0)
self.check_constant_output((f + g) + 1, 6.0)
self.check_constant_output(f + (g + 1), 6.0)
self.check_constant_output((f + f) + f, 9.0)
self.check_constant_output(f + (f + f), 9.0)
self.check_constant_output((f + g) + f, 8.0)
self.check_constant_output(f + (g + f), 8.0)
self.check_constant_output((f + g) + (f + g), 10.0)
self.check_constant_output((f + g) + (g + f), 10.0)
self.check_constant_output((g + f) + (f + g), 10.0)
self.check_constant_output((g + f) + (g + f), 10.0)
def test_subtract(self):
f, g = self.f, self.g
self.check_constant_output(f - g, 1.0) # 3 - 2
self.check_constant_output((1 - f) - g, -4.) # (1 - 3) - 2
self.check_constant_output(1 - (f - g), 0.0) # 1 - (3 - 2)
self.check_constant_output((f - 1) - g, 0.0) # (3 - 1) - 2
self.check_constant_output(f - (1 - g), 4.0) # 3 - (1 - 2)
self.check_constant_output((f - g) - 1, 0.0) # (3 - 2) - 1
self.check_constant_output(f - (g - 1), 2.0) # 3 - (2 - 1)
self.check_constant_output((f - f) - f, -3.) # (3 - 3) - 3
self.check_constant_output(f - (f - f), 3.0) # 3 - (3 - 3)
self.check_constant_output((f - g) - f, -2.) # (3 - 2) - 3
self.check_constant_output(f - (g - f), 4.0) # 3 - (2 - 3)
self.check_constant_output((f - g) - (f - g), 0.0) # (3 - 2) - (3 - 2)
self.check_constant_output((f - g) - (g - f), 2.0) # (3 - 2) - (2 - 3)
self.check_constant_output((g - f) - (f - g), -2.) # (2 - 3) - (3 - 2)
self.check_constant_output((g - f) - (g - f), 0.0) # (2 - 3) - (2 - 3)
def test_multiply(self):
f, g = self.f, self.g
self.check_constant_output(f * g, 6.0)
self.check_constant_output((2 * f) * g, 12.0)
self.check_constant_output(2 * (f * g), 12.0)
self.check_constant_output((f * 2) * g, 12.0)
self.check_constant_output(f * (2 * g), 12.0)
self.check_constant_output((f * g) * 2, 12.0)
self.check_constant_output(f * (g * 2), 12.0)
self.check_constant_output((f * f) * f, 27.0)
self.check_constant_output(f * (f * f), 27.0)
self.check_constant_output((f * g) * f, 18.0)
self.check_constant_output(f * (g * f), 18.0)
self.check_constant_output((f * g) * (f * g), 36.0)
self.check_constant_output((f * g) * (g * f), 36.0)
self.check_constant_output((g * f) * (f * g), 36.0)
self.check_constant_output((g * f) * (g * f), 36.0)
self.check_constant_output(f * f * f * 0 * f * f, 0.0)
def test_divide(self):
f, g = self.f, self.g
self.check_constant_output(f / g, 3.0 / 2.0)
self.check_constant_output(
(2 / f) / g,
(2 / 3.0) / 2.0
)
self.check_constant_output(
2 / (f / g),
2 / (3.0 / 2.0),
)
self.check_constant_output(
(f / 2) / g,
(3.0 / 2) / 2.0,
)
self.check_constant_output(
f / (2 / g),
3.0 / (2 / 2.0),
)
self.check_constant_output(
(f / g) / 2,
(3.0 / 2.0) / 2,
)
self.check_constant_output(
f / (g / 2),
3.0 / (2.0 / 2),
)
self.check_constant_output(
(f / f) / f,
(3.0 / 3.0) / 3.0
)
self.check_constant_output(
f / (f / f),
3.0 / (3.0 / 3.0),
)
self.check_constant_output(
(f / g) / f,
(3.0 / 2.0) / 3.0,
)
self.check_constant_output(
f / (g / f),
3.0 / (2.0 / 3.0),
)
self.check_constant_output(
(f / g) / (f / g),
(3.0 / 2.0) / (3.0 / 2.0),
)
self.check_constant_output(
(f / g) / (g / f),
(3.0 / 2.0) / (2.0 / 3.0),
)
self.check_constant_output(
(g / f) / (f / g),
(2.0 / 3.0) / (3.0 / 2.0),
)
self.check_constant_output(
(g / f) / (g / f),
(2.0 / 3.0) / (2.0 / 3.0),
)
def test_pow(self):
f, g = self.f, self.g
self.check_constant_output(f ** g, 3.0 ** 2)
self.check_constant_output(2 ** f, 2.0 ** 3)
self.check_constant_output(f ** 2, 3.0 ** 2)
self.check_constant_output((f + g) ** 2, (3.0 + 2.0) ** 2)
self.check_constant_output(2 ** (f + g), 2 ** (3.0 + 2.0))
self.check_constant_output(f ** (f ** g), 3.0 ** (3.0 ** 2.0))
self.check_constant_output((f ** f) ** g, (3.0 ** 3.0) ** 2.0)
self.check_constant_output((f ** g) ** (f ** g), 9.0 ** 9.0)
self.check_constant_output((f ** g) ** (g ** f), 9.0 ** 8.0)
self.check_constant_output((g ** f) ** (f ** g), 8.0 ** 9.0)
self.check_constant_output((g ** f) ** (g ** f), 8.0 ** 8.0)
def test_mod(self):
f, g = self.f, self.g
self.check_constant_output(f % g, 3.0 % 2.0)
self.check_constant_output(f % 2.0, 3.0 % 2.0)
self.check_constant_output(g % f, 2.0 % 3.0)
self.check_constant_output((f + g) % 2, (3.0 + 2.0) % 2)
self.check_constant_output(2 % (f + g), 2 % (3.0 + 2.0))
self.check_constant_output(f % (f % g), 3.0 % (3.0 % 2.0))
self.check_constant_output((f % f) % g, (3.0 % 3.0) % 2.0)
self.check_constant_output((f + g) % (f * g), 5.0 % 6.0)
def test_math_functions(self):
f, g = self.f, self.g
fake_raw_data = self.fake_raw_data
alt_fake_raw_data = {
self.f: full((5, 5), .5),
self.g: full((5, 5), -.5),
}
for funcname in NUMEXPR_MATH_FUNCS:
method = methodcaller(funcname)
func = getattr(numpy, funcname)
# These methods have domains in [0, 1], so we need alternate inputs
# that are in the domain.
if funcname in ('arcsin', 'arccos', 'arctanh'):
self.fake_raw_data = alt_fake_raw_data
else:
self.fake_raw_data = fake_raw_data
f_val = self.fake_raw_data[f][0, 0]
g_val = self.fake_raw_data[g][0, 0]
self.check_constant_output(method(f), func(f_val))
self.check_constant_output(method(g), func(g_val))
self.check_constant_output(method(f) + 1, func(f_val) + 1)
self.check_constant_output(1 + method(f), 1 + func(f_val))
self.check_constant_output(method(f + .25), func(f_val + .25))
self.check_constant_output(method(.25 + f), func(.25 + f_val))
self.check_constant_output(
method(f) + method(g),
func(f_val) + func(g_val),
)
self.check_constant_output(
method(f + g),
func(f_val + g_val),
)
def test_comparisons(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25, dtype=float).reshape(5, 5),
g: arange(25, dtype=float).reshape(5, 5) - eye(5),
h: full((5, 5), 5, dtype=float),
}
f_data = self.fake_raw_data[f]
g_data = self.fake_raw_data[g]
cases = [
# Sanity Check with hand-computed values.
(f, g, eye(5), zeros((5, 5))),
(f, 10, f_data, 10),
(10, f, 10, f_data),
(f, f, f_data, f_data),
(f + 1, f, f_data + 1, f_data),
(1 + f, f, 1 + f_data, f_data),
(f, g, f_data, g_data),
(f + 1, g, f_data + 1, g_data),
(f, g + 1, f_data, g_data + 1),
(f + 1, g + 1, f_data + 1, g_data + 1),
((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2),
]
for op in (gt, ge, lt, le, ne):
for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases:
self.check_output(
op(expr_lhs, expr_rhs),
op(expected_lhs, expected_rhs),
)
def test_boolean_binops(self):
f, g, h = self.f, self.g, self.h
# Add a non-numexpr filter to ensure that we correctly handle
# delegation to NumericalExpression.
custom_filter = NonExprFilter()
custom_filter_mask = array(
[[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 1, 0]],
dtype=bool,
)
self.fake_raw_data = {
f: arange(25, dtype=float).reshape(5, 5),
g: arange(25, dtype=float).reshape(5, 5) - eye(5),
h: full((5, 5), 5, dtype=float),
custom_filter: custom_filter_mask,
}
# Should be True on the diagonal.
eye_filter = (f > g)
# Should be True in the first row only.
first_row_filter = f < h
eye_mask = eye(5, dtype=bool)
first_row_mask = zeros((5, 5), dtype=bool)
first_row_mask[0] = 1
self.check_output(eye_filter, eye_mask)
self.check_output(first_row_filter, first_row_mask)
def gen_boolops(x, y, z):
"""
Generate all possible interleavings of & and | between all possible
orderings of x, y, and z.
"""
for a, b, c in permutations([x, y, z]):
yield (a & b) & c
yield (a & b) | c
yield (a | b) & c
yield (a | b) | c
yield a & (b & c)
yield a & (b | c)
yield a | (b & c)
yield a | (b | c)
exprs = gen_boolops(eye_filter, custom_filter, first_row_filter)
arrays = gen_boolops(eye_mask, custom_filter_mask, first_row_mask)
for expr, expected in zip(exprs, arrays):
self.check_output(expr, expected)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pook documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 4 18:59:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import pook # noqa
import sphinx_rtd_theme # noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pook'
copyright = '2016, Tomas Aparicio'
author = 'Tomas Aparicio'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pook.__version__
# The full version, including alpha/beta/rc tags.
release = pook.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'pook v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pook.tex', 'pook Documentation',
'Tomas Aparicio', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pook', 'pook Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pook', 'pook Documentation',
author, 'pook', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import TestCase
import numpy as np
import runtime.testing as testing
from runtime.db import (XGBOOST_NULL_MAGIC, buffered_db_writer,
connect_with_data_source, db_generator,
get_table_schema, limit_select, read_feature,
read_features_from_row, selected_columns_and_types)
from runtime.dbapi import connect
from runtime.dbapi.mysql import MYSQL_FIELD_TYPE_DICT
def execute(conn, statement):
rs = conn.query(statement)
field_names = [c[0] for c in rs.column_info()]
rows = [r for r in rs]
return field_names, rows
class TestDB(TestCase):
create_statement = "create table test_db (features text, label int)"
hive_create_statement = 'create table test_db (features string, ' \
'label int) ROW FORMAT DELIMITED FIELDS ' \
'TERMINATED BY "\001"'
select_statement = "select * from test_db"
drop_statement = "drop table if exists test_db"
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_mysql(self):
conn = connect(testing.get_datasource())
self._do_test(conn)
conn.close()
@unittest.skipUnless(testing.get_driver() == "hive", "skip non hive tests")
def test_hive(self):
uri = testing.get_datasource()
conn = connect(uri)
self._do_test(conn)
self._do_test_hive_specified_db(conn)
def _do_test_hive_specified_db(self, conn):
create_db = '''create database if not exists test_db'''
create_tbl = '''create table test_db.tbl (features string, label int)
ROW FORMAT DELIMITED FIELDS TERMINATED BY "\001"'''
drop_tbl = '''drop table if exists test_db.tbl'''
select_tbl = '''select * from test_db.tbl'''
table_schema = ["label", "features"]
values = [(1, '5,6,1,2')] * 10
self.assertTrue(conn.execute(create_db))
self.assertTrue(conn.execute(drop_tbl))
self.assertTrue(conn.execute(create_tbl))
with buffered_db_writer(conn,
"test_db.tbl",
table_schema,
buff_size=10) as w:
for row in values:
w.write(row)
field_names, data = execute(conn, select_tbl)
expect_result = [('5,6,1,2', 1)] * 10
self.assertEqual(field_names, ['features', 'label'])
self.assertEqual(expect_result, data)
def _do_test(self, conn):
table_name = "test_db"
table_schema = ["features", "label"]
values = [('5,6,1,2', 1)] * 10
conn.execute(self.drop_statement)
if conn.driver == "hive":
conn.execute(self.hive_create_statement)
else:
conn.execute(self.create_statement)
with buffered_db_writer(conn, table_name, table_schema,
buff_size=10) as w:
for row in values:
w.write(row)
field_names, data = execute(conn, self.select_statement)
self.assertEqual(table_schema, field_names)
self.assertEqual(values, data)
class TestGenerator(TestCase):
create_statement = """create table test_table_float_fea
(f1 float, f2 int, f3str VARCHAR(255),
f4sparse VARCHAR(255), f5dense VARCHAR(255), label int)"""
drop_statement = "drop table if exists test_table_float_fea"
insert_statement = """insert into test_table_float_fea
(f1,f2,f3str,f4sparse,f5dense,label)
values(1.0,1,'a','1:1.0 2:2.0','1,2,3',0), (NULL,NULL,NULL,NULL,'1,2,3',1)"""
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_generator(self):
conn = connect(testing.get_datasource())
# prepare test data
conn.execute(self.drop_statement)
conn.execute(self.create_statement)
conn.execute(self.insert_statement)
column_name_to_type = {
"f1": {
"feature_name": "f1",
"delimiter": "",
"dtype": "float32",
"is_sparse": False,
"shape": []
},
"f2": {
"feature_name": "f2",
"delimiter": "",
"dtype": "int64",
"is_sparse": False,
"shape": []
},
"f3str": {
"feature_name": "f3str",
"delimiter": "",
"dtype": "string",
"is_sparse": False,
"shape": []
},
"f4sparse": {
"feature_name": "f4sparse",
"delimiter": "",
"dtype": "float32",
"is_sparse": True,
"shape": [],
"format": "kv"
},
"f5dense": {
"feature_name": "f5dense",
"delimiter": ",",
"dtype": "int64",
"is_sparse": False,
"shape": [3]
}
}
label_meta = {"feature_name": "label", "shape": [], "delimiter": ""}
gen = db_generator(conn, "SELECT * FROM test_table_float_fea",
label_meta)
idx = 0
for row, label in gen():
if idx == 0:
features = read_features_from_row(
row, ["f1", "f2", "f3str", "f4sparse", "f5dense"],
["f1", "f2", "f3str", "f4sparse", "f5dense"],
column_name_to_type)
self.assertEqual(1.0, features[0][0])
self.assertEqual(1, features[1][0])
self.assertEqual('a', features[2][0])
self.assertTrue(
np.array_equal(np.array([[1], [2]]), features[3][0]))
self.assertTrue(
np.array_equal(np.array([1., 2.], dtype=np.float32),
features[3][1]))
self.assertTrue(
np.array_equal(np.array([1, 2, 3]), features[4][0]))
self.assertEqual(0, label)
elif idx == 1:
try:
features = read_features_from_row(
row, ["f1", "f2", "f3str", "f4sparse", "f5dense"],
["f1", "f2", "f3str", "f4sparse", "f5dense"],
column_name_to_type)
except Exception as e:
self.assertTrue(isinstance(e, ValueError))
features = read_features_from_row(
row, ["f1", "f2", "f3str", "f4sparse", "f5dense"],
["f1", "f2", "f3str", "f4sparse", "f5dense"],
column_name_to_type,
is_xgboost=True)
self.assertEqual(XGBOOST_NULL_MAGIC, features[0][0])
self.assertEqual(int(XGBOOST_NULL_MAGIC), features[1][0])
self.assertEqual("", features[2][0])
self.assertTrue(np.array_equal(np.array([]), features[3][0]))
self.assertTrue(np.array_equal(np.array([]), features[3][1]))
self.assertTrue(
np.array_equal(np.array([1, 2, 3]), features[4][0]))
self.assertEqual(1, label)
idx += 1
self.assertEqual(idx, 2)
@unittest.skipUnless(testing.get_driver() == "mysql",
"skip non mysql tests")
def test_generate_fetch_size(self):
label_meta = {"feature_name": "label", "shape": [], "delimiter": ""}
gen = db_generator(testing.get_singleton_db_connection(),
'SELECT * FROM iris.train limit 10', label_meta)
self.assertEqual(len([g for g in gen()]), 10)
class TestConnectWithDataSource(TestCase):
def test_kv_feature_column(self):
feature_spec = {
"name": "kv_feature_name",
"is_sparse": True,
"format": "kv",
"dtype": "float32",
"shape": [10],
"delimiter": ""
}
raw_val = "0:1 3:4 4:6"
indices, values, shape = read_feature(raw_val, feature_spec,
feature_spec["name"], True)
self.assertTrue(
np.array_equal(indices, np.array([0, 3, 4], dtype='int64')))
self.assertTrue(
np.array_equal(values, np.array([1, 4, 6], dtype='float32')))
self.assertTrue(np.array_equal(shape, np.array([10], dtype='float32')))
class TestGetTableSchema(TestCase):
def test_get_table_schema(self):
conn = testing.get_singleton_db_connection()
if conn.driver == "mysql":
schema = get_table_schema(conn, "iris.train")
expect = [
('sepal_length', 'FLOAT'),
('sepal_width', 'FLOAT'),
('petal_length', 'FLOAT'),
('petal_width', 'FLOAT'),
('class', 'INT'),
]
self.assertEqual(expect, schema)
schema = selected_columns_and_types(
conn,
"SELECT sepal_length, petal_width * 2.3 new_petal_width, "
"class FROM iris.train")
expect = [
("sepal_length", "FLOAT"),
("new_petal_width", "DOUBLE"),
("class", "INT"),
]
self.assertEqual(expect, schema)
elif conn.driver == "hive":
schema = get_table_schema(conn, "iris.train")
expect = (
('sepal_length', 'FLOAT'),
('sepal_width', 'FLOAT'),
('petal_length', 'FLOAT'),
('petal_width', 'FLOAT'),
('class', 'INT'),
)
self.assertTrue(np.array_equal(expect, schema))
schema = selected_columns_and_types(
conn,
"SELECT sepal_length, petal_width * 2.3 AS new_petal_width, "
"class FROM iris.train")
expect = [
("sepal_length", "FLOAT"),
("new_petal_width", "FLOAT"),
("class", "INT"),
]
self.assertTrue(np.array_equal(expect, schema))
elif conn.driver == "maxcompute":
case_db = os.getenv("SQLFLOW_TEST_DB_MAXCOMPUTE_PROJECT")
table = "%s.sqlflow_iris_train" % case_db
schema = get_table_schema(conn, table)
expect = [
('sepal_length', 'DOUBLE'),
('sepal_width', 'DOUBLE'),
('petal_length', 'DOUBLE'),
('petal_width', 'DOUBLE'),
('class', 'BIGINT'),
]
self.assertTrue(np.array_equal(expect, schema))
schema = selected_columns_and_types(
conn,
"SELECT sepal_length, petal_width * 2.3 new_petal_width, "
"class FROM %s" % table)
expect = [
("sepal_length", "DOUBLE"),
("new_petal_width", "DOUBLE"),
("class", "BIGINT"),
]
self.assertTrue(np.array_equal(expect, schema))
class TestMySQLFieldType(TestCase):
@unittest.skipUnless(
os.getenv("SQLFLOW_TEST_DB") == "mysql", "run only in mysql")
def test_field_type(self):
self.assertGreater(len(MYSQL_FIELD_TYPE_DICT), 0)
conn = connect_with_data_source(testing.get_datasource())
table_name = "iris.test_mysql_field_type_table"
drop_table_sql = "DROP TABLE IF EXISTS %s" % table_name
create_table_sql = "CREATE TABLE IF NOT EXISTS " + \
table_name + "(a %s)"
select_sql = "SELECT * FROM %s" % table_name
for int_type, str_type in MYSQL_FIELD_TYPE_DICT.items():
if str_type in ["VARCHAR", "CHAR"]:
str_type += "(255)"
conn.execute(drop_table_sql)
conn.execute(create_table_sql % str_type)
# we are meant to use low layer cursor here to
# check the type value with the real value returned by mysql
cursor = conn.cursor()
cursor.execute(select_sql)
int_type_actual = cursor.description[0][1]
cursor.close()
conn.execute(drop_table_sql)
self.assertEqual(int_type_actual, int_type,
"%s not match" % str_type)
class TestLimitSelect(TestCase):
def test_limit_select(self):
self.assertEqual("SELECT * FROM t LIMIT 2",
limit_select("SELECT * FROM t LIMIT 30", 2))
self.assertEqual("SELECT * FROM t LIMIT 30; \t",
limit_select("SELECT * FROM t LIMIT 30; \t", 100))
self.assertEqual("SELECT * FROM t LIMIT 3",
limit_select("SELECT * FROM t", 3))
self.assertEqual("SELECT * FROM t \t LIMIT 4; ",
limit_select("SELECT * FROM t \t ; ", 4))
@unittest.skipIf(testing.get_driver() == "maxcompute", "skip maxcompute tests")
class TestQuery(TestCase):
def test_query(self):
conn = connect_with_data_source(testing.get_datasource())
rs = conn.query("select * from iris.train limit 1")
rows = [row for row in rs]
self.assertEqual(1, len(rows))
conn.execute("drop table if exists A")
conn.execute("create table A(a int);")
conn.execute("insert into A values(1)")
rs = conn.query("select * from A;")
rows = [row for row in rs]
self.assertEqual(1, len(rows))
conn.query("truncate table A")
rs = conn.query("select * from A;")
rows = [row for row in rs]
self.assertEqual(0, len(rows))
columns = rs.column_info()
self.assertEqual(1, len(columns))
self.assertEqual("a", columns[0][0])
self.assertEqual("INT", columns[0][1])
self.assertTrue(conn.execute("drop table if exists A"))
if __name__ == "__main__":
unittest.main()
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: plugin_data
:platform: Unix
:synopsis: Contains the PluginData class. Each Data set used in a plugin \
has a PluginData object encapsulated within it, for the duration of a \
plugin run.
.. moduleauthor:: Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import sys
import copy
import h5py
import logging
import numpy as np
from fractions import gcd
from savu.data.meta_data import MetaData
from savu.data.data_structures.data_add_ons import Padding
class PluginData(object):
""" The PluginData class contains plugin specific information about a Data
object for the duration of a plugin. An instance of the class is
encapsulated inside the Data object during the plugin run
"""
def __init__(self, data_obj, plugin=None):
self.data_obj = data_obj
self._preview = None
self.data_obj._set_plugin_data(self)
self.meta_data = MetaData()
self.padding = None
self.pad_dict = None
self.shape = None
self.shape_transfer = None
self.core_shape = None
self.multi_params = {}
self.extra_dims = []
self._plugin = plugin
self.fixed_dims = True
self.split = None
self.boundary_padding = None
self.no_squeeze = False
self.pre_tuning_shape = None
self._frame_limit = None
def _get_preview(self):
return self._preview
def get_total_frames(self):
""" Get the total number of frames to process (all MPI processes).
:returns: Number of frames
:rtype: int
"""
temp = 1
slice_dir = \
self.data_obj.get_data_patterns()[
self.get_pattern_name()]["slice_dims"]
for tslice in slice_dir:
temp *= self.data_obj.get_shape()[tslice]
return temp
def __set_pattern(self, name):
""" Set the pattern related information in the meta data dict.
"""
pattern = self.data_obj.get_data_patterns()[name]
self.meta_data.set("name", name)
self.meta_data.set("core_dims", pattern['core_dims'])
self.__set_slice_dimensions()
def get_pattern_name(self):
""" Get the pattern name.
:returns: the pattern name
:rtype: str
"""
try:
name = self.meta_data.get("name")
return name
except KeyError:
raise Exception("The pattern name has not been set.")
def get_pattern(self):
""" Get the current pattern.
:returns: dict of the pattern name against the pattern.
:rtype: dict
"""
pattern_name = self.get_pattern_name()
return {pattern_name: self.data_obj.get_data_patterns()[pattern_name]}
def __set_shape(self):
""" Set the shape of the plugin data processing chunk.
"""
core_dir = self.data_obj.get_core_dimensions()
slice_dir = self.data_obj.get_slice_dimensions()
dirs = list(set(core_dir + (slice_dir[0],)))
slice_idx = dirs.index(slice_dir[0])
dshape = self.data_obj.get_shape()
shape = []
for core in set(core_dir):
shape.append(dshape[core])
self.__set_core_shape(tuple(shape))
mfp = self._get_max_frames_process()
if mfp > 1 or self._get_no_squeeze():
shape.insert(slice_idx, mfp)
self.shape = tuple(shape)
def _set_shape_transfer(self, slice_size):
dshape = self.data_obj.get_shape()
shape_before_tuning = self._get_shape_before_tuning()
add = [1]*(len(dshape) - len(shape_before_tuning))
slice_size = slice_size + add
core_dir = self.data_obj.get_core_dimensions()
slice_dir = self.data_obj.get_slice_dimensions()
shape = [None]*len(dshape)
for dim in core_dir:
shape[dim] = dshape[dim]
i = 0
for dim in slice_dir:
shape[dim] = slice_size[i]
i += 1
self.shape_transfer = tuple(shape)
def get_bytes_per_frame(self):
""" Return the size of a single frame in bytes. """
dtype = self.data_obj.dtype
if dtype is None:
data = self.data_obj.data
if hasattr(data, 'dtype'):
dtype = data.dtype
else:
h5 = h5py._hl.dataset.Dataset
dtype = data.dtype if isinstance(data, h5) else data.data.dtype
else:
dtype = np.dtype(dtype)
nBytes = dtype.itemsize
dims = self.get_pattern().values()[0]['core_dims']
return np.prod([self.data_obj.get_shape()[d] for d in dims])*nBytes
def get_shape(self):
""" Get the shape of the data (without padding) that is passed to the
plugin process_frames method.
"""
return self.shape
def _set_padded_shape(self):
pass
def get_padded_shape(self):
""" Get the shape of the data (with padding) that is passed to the
plugin process_frames method.
"""
return self.shape
def get_shape_transfer(self):
""" Get the shape of the plugin data to be transferred each time.
"""
return self.shape_transfer
def __set_core_shape(self, shape):
""" Set the core shape to hold only the shape of the core dimensions
"""
self.core_shape = shape
def get_core_shape(self):
""" Get the shape of the core dimensions only.
:returns: shape of core dimensions
:rtype: tuple
"""
return self.core_shape
def _set_shape_before_tuning(self, shape):
""" Set the shape of the full dataset used during each run of the \
plugin (i.e. ignore extra dimensions due to parameter tuning). """
self.pre_tuning_shape = shape
def _get_shape_before_tuning(self):
""" Return the shape of the full dataset used during each run of the \
plugin (i.e. ignore extra dimensions due to parameter tuning). """
return self.pre_tuning_shape if self.pre_tuning_shape else\
self.data_obj.get_shape()
def __check_dimensions(self, indices, core_dir, slice_dir, nDims):
if len(indices) is not len(slice_dir):
sys.exit("Incorrect number of indices specified when accessing "
"data.")
if (len(core_dir)+len(slice_dir)) is not nDims:
sys.exit("Incorrect number of data dimensions specified.")
def __set_slice_dimensions(self):
""" Set the slice dimensions in the pluginData meta data dictionary.
"""
slice_dirs = self.data_obj.get_data_patterns()[
self.get_pattern_name()]['slice_dims']
self.meta_data.set('slice_dims', slice_dirs)
def get_slice_dimension(self):
"""
Return the position of the slice dimension in relation to the data
handed to the plugin.
"""
core_dirs = self.data_obj.get_core_dimensions()
slice_dir = self.data_obj.get_slice_dimensions()[0]
return list(set(core_dirs + (slice_dir,))).index(slice_dir)
def get_data_dimension_by_axis_label(self, label, contains=False):
"""
Return the dimension of the data in the plugin that has the specified
axis label.
"""
label_dim = self.data_obj.get_data_dimension_by_axis_label(
label, contains=contains)
plugin_dims = self.data_obj.get_core_dimensions()
if self._get_max_frames_process() > 1 or self.max_frames == 'multiple':
plugin_dims += (self.get_slice_dimension(),)
return list(set(plugin_dims)).index(label_dim)
def set_slicing_order(self, order):
"""
Reorder the slice dimensions. The fastest changing slice dimension
will always be the first one stated in the pattern key ``slice_dir``.
The input param is a tuple stating the desired order of slicing
dimensions relative to the current order.
"""
slice_dirs = self.get_slice_directions()
if len(slice_dirs) < len(order):
raise Exception("Incorrect number of dimensions specifed.")
ordered = [slice_dirs[o] for o in order]
remaining = [s for s in slice_dirs if s not in ordered]
new_slice_dirs = tuple(ordered + remaining)
self.get_current_pattern()['slice_dir'] = new_slice_dirs
def get_core_dimensions(self):
"""
Return the position of the core dimensions in relation to the data
handed to the plugin.
"""
core_dims = self.data_obj.get_core_dimensions()
first_slice_dim = (self.data_obj.get_slice_dimensions()[0],)
plugin_dims = np.sort(core_dims + first_slice_dim)
return np.searchsorted(plugin_dims, np.sort(core_dims))
def set_fixed_dimensions(self, dims, values):
""" Fix a data direction to the index in values list.
:param list(int) dims: Directions to fix
:param list(int) value: Index of fixed directions
"""
slice_dirs = self.data_obj.get_slice_dimensions()
if set(dims).difference(set(slice_dirs)):
raise Exception("You are trying to fix a direction that is not"
" a slicing direction")
self.meta_data.set("fixed_dimensions", dims)
self.meta_data.set("fixed_dimensions_values", values)
self.__set_slice_dimensions()
shape = list(self.data_obj.get_shape())
for dim in dims:
shape[dim] = 1
self.data_obj.set_shape(tuple(shape))
self.__set_shape()
def _get_fixed_dimensions(self):
""" Get the fixed data directions and their indices
:returns: Fixed directions and their associated values
:rtype: list(list(int), list(int))
"""
fixed = []
values = []
if 'fixed_dimensions' in self.meta_data.get_dictionary():
fixed = self.meta_data.get("fixed_dimensions")
values = self.meta_data.get("fixed_dimensions_values")
return [fixed, values]
def _get_data_slice_list(self, plist):
""" Convert a plugin data slice list to a slice list for the whole
dataset, i.e. add in any missing dimensions.
"""
nDims = len(self.get_shape())
all_dims = self.get_core_dimensions() + self.get_slice_dimension()
extra_dims = all_dims[nDims:]
dlist = list(plist)
for i in extra_dims:
dlist.insert(i, slice(None))
return tuple(dlist)
def _get_max_frames_process(self):
""" Get the number of frames to process for each run of process_frames.
If the number of frames is not divisible by the previewing ``chunk``
value then amend the number of frames to gcd(frames, chunk)
:returns: Number of frames to process
:rtype: int
"""
if self._plugin and self._plugin.chunk > 1:
frame_chunk = self.meta_data.get("max_frames_process")
chunk = self.data_obj.get_preview().get_starts_stops_steps(
key='chunks')[self.get_slice_directions()[0]]
self.meta_data.set('max_frames_process', gcd(frame_chunk, chunk))
return self.meta_data.get("max_frames_process")
def _get_max_frames_transfer(self):
""" Get the number of frames to transfer for each run of
process_frames. """
return self.meta_data.get('max_frames_transfer')
def _set_no_squeeze(self):
self.no_squeeze = True
def _get_no_squeeze(self):
return self.no_squeeze
def _get_max_frames_parameters(self):
fixed, _ = self._get_fixed_dimensions()
sdir = \
[s for s in self.data_obj.get_slice_dimensions() if s not in fixed]
shape = self.data_obj.get_shape()
shape_before_tuning = self._get_shape_before_tuning()
diff = len(shape) - len(shape_before_tuning)
if diff:
shape = shape_before_tuning
sdir = sdir[:-diff]
frames = np.prod([shape[d] for d in sdir])
base_names = [p.__name__ for p in self._plugin.__class__.__bases__]
processes = self.data_obj.exp.meta_data.get('processes')
if 'GpuPlugin' in base_names:
n_procs = len([n for n in processes if 'GPU' in n])
else:
n_procs = len(processes)
f_per_p = np.ceil(frames/n_procs)
params_dict = {'shape': shape, 'sdir': sdir, 'total_frames': frames,
'mpi_procs': n_procs, 'frames_per_process': f_per_p}
return params_dict
def __log_max_frames(self, mft, mfp, check=True):
logging.debug("Setting max frames transfer for plugin %s to %d" %
(self._plugin, mft))
logging.debug("Setting max frames process for plugin %s to %d" %
(self._plugin, mfp))
self.meta_data.set('max_frames_process', mfp)
if check:
self.__check_distribution(mft)
# (((total_frames/mft)/mpi_procs) % 1)
def __check_distribution(self, mft):
self.params = self._get_max_frames_parameters()
warn_threshold = 0.85
nprocs = self.params['mpi_procs']
nframes = self.params['total_frames']
temp = (((nframes/mft)/float(nprocs)) % 1)
if temp != 0.0 and temp < warn_threshold:
logging.warn('UNEVEN FRAME DISTRIBUTION: shape %s, nframes %s ' +
'sdir %s, nprocs %s', self.params['shape'],
nframes, self.params['sdir'], nprocs)
def _set_padding_dict(self):
if self.padding and not isinstance(self.padding, Padding):
self.pad_dict = copy.deepcopy(self.padding)
self.padding = Padding(self)
for key in self.pad_dict.keys():
getattr(self.padding, key)(self.pad_dict[key])
def plugin_data_setup(self, pattern, nFrames, split=None):
""" Setup the PluginData object.
# add more information into here via a decorator!
:param str pattern: A pattern name
:param int nFrames: How many frames to process at a time. Choose from\
'single', 'multiple', 'fixed_multiple' or an integer (an integer \
should only ever be passed in exceptional circumstances)
"""
self.__set_pattern(pattern)
mData = self.data_obj.exp.meta_data
if 'dawn_runner' in mData.get_dictionary().keys():
return
chunks = \
self.data_obj.get_preview().get_starts_stops_steps(key='chunks')
if isinstance(nFrames, list):
nFrames, self._frame_limit = nFrames
self.__set_max_frames(nFrames)
mft = self.meta_data.get('max_frames_transfer')
if self._plugin and mft \
and (chunks[self.data_obj.get_slice_dimensions()[0]] % mft):
self._plugin.chunk = True
self.__set_shape()
self.split = split
def __set_max_frames(self, nFrames):
self.max_frames = nFrames
self.__perform_checks(nFrames)
td = self.data_obj._get_transport_data()
mft, mft_shape = td._calc_max_frames_transfer(nFrames)
self.meta_data.set('max_frames_transfer', mft)
if mft:
self._set_shape_transfer(mft_shape)
mfp = td._calc_max_frames_process(nFrames)
self.meta_data.set('max_frames_process', mfp)
self.__log_max_frames(mft, mfp)
# Retain the shape if the first slice dimension has length 1
if mfp == 1 and nFrames == 'multiple':
self._set_no_squeeze()
def __perform_checks(self, nFrames):
options = ['single', 'multiple']
if not isinstance(nFrames, int) and nFrames not in options:
e_str = "The value of nFrames is not recognised. Please choose "
"from 'single' and 'multiple' (or an integer in exceptional "
"circumstances)."
raise Exception(e_str)
def get_frame_limit(self):
return self._frame_limit
def get_current_frame_idx(self):
""" Returns the index of the frames currently being processed.
"""
global_index = self._plugin.get_global_frame_index()
count = self._plugin.get_process_frames_counter()
mfp = self.meta_data.get('max_frames_process')
start = global_index[count]*mfp
index = np.arange(start, start + mfp)
nFrames = self.get_total_frames()
index[index >= nFrames] = nFrames - 1
return index
|
import numpy as np
import os
import path_config
from datasets.data import Sequence, BaseDataset, SequenceList
def GOT10KDatasetTest():
""" GOT-10k official test set"""
return GOT10KDatasetClass("test").get_sequence_list()
def GOT10KDatasetVal():
""" GOT-10k official val set"""
return GOT10KDatasetClass("val").get_sequence_list()
class GOT10KDatasetClass(BaseDataset):
""" GOT-10k dataset.
Publication:
GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild
Lianghua Huang, Xin Zhao, and Kaiqi Huang
arXiv:1810.11981, 2018
https://arxiv.org/pdf/1810.11981.pdf
Download dataset from http://got-10k.aitestunion.com/downloads
"""
def __init__(self, split):
"""
args:
split - Split to use. Can be i) 'test': official test set, ii) 'val': official val set, and iii) 'ltrval':
a custom validation set, a subset of the official train set.
"""
super().__init__()
# Split can be test, val, or ltrval
if split == "test" or split == "val":
self.base_path = os.path.join(path_config.GOT10K_PATH, split)
else:
self.base_path = os.path.join(path_config.GOT10K_PATH, "train")
self.sequence_list = self._get_sequence_list(split)
self.split = split
def get_sequence_list(self):
return SequenceList([self._construct_sequence(s) for s in self.sequence_list])
def _construct_sequence(self, sequence_name):
anno_path = "{}/{}/groundtruth.txt".format(self.base_path, sequence_name)
try:
ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)
except Exception:
ground_truth_rect = np.loadtxt(
str(anno_path), delimiter=",", dtype=np.float64
)
frames_path = "{}/{}".format(self.base_path, sequence_name)
frame_list = [
frame for frame in os.listdir(frames_path) if frame.endswith(".jpg")
]
frame_list.sort(key=lambda f: int(f[:-4]))
frames_list = [os.path.join(frames_path, frame) for frame in frame_list]
return Sequence(
sequence_name, frames_list, "got10k", ground_truth_rect.reshape(-1, 4)
)
def __len__(self):
"""Overload this function in your evaluation. This should return number of sequences in the evaluation """
return len(self.sequence_list)
def _get_sequence_list(self, split):
with open("{}/list.txt".format(self.base_path)) as f:
sequence_list = f.read().splitlines()
return sequence_list
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
from datetime import datetime
from typing import Any, Iterator, Mapping, MutableMapping, Type
from airbyte_protocol import (
AirbyteCatalog,
AirbyteConnectionStatus,
AirbyteMessage,
AirbyteRecordMessage,
AirbyteStateMessage,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
Status,
SyncMode,
)
from airbyte_protocol import Type as MessageType
from .client import BaseClient
from .integration import Source
from .logger import AirbyteLogger
class BaseSource(Source):
"""Base source that designed to work with clients derived from BaseClient"""
client_class: Type[BaseClient] = None
@property
def name(self) -> str:
"""Source name"""
return self.__class__.__name__
def _get_client(self, config: Mapping):
"""Construct client"""
client = self.client_class(**config)
return client
def discover(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteCatalog:
"""Discover streams"""
client = self._get_client(config)
return AirbyteCatalog(streams=[stream for stream in client.streams])
def check(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
"""Check connection"""
client = self._get_client(config)
alive, error = client.health_check()
if not alive:
return AirbyteConnectionStatus(status=Status.FAILED, message=str(error))
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
def read(
self, logger: AirbyteLogger, config: Mapping[str, Any], catalog: ConfiguredAirbyteCatalog, state: MutableMapping[str, Any] = None
) -> Iterator[AirbyteMessage]:
state = state or {}
client = self._get_client(config)
logger.info(f"Starting syncing {self.name}")
total_state = copy.deepcopy(state)
for configured_stream in catalog.streams:
try:
yield from self._read_stream(logger=logger, client=client, configured_stream=configured_stream, state=total_state)
except Exception:
logger.exception(f"Encountered an exception while reading stream {self.name}")
raise
logger.info(f"Finished syncing {self.name}")
def _read_stream(
self, logger: AirbyteLogger, client: BaseClient, configured_stream: ConfiguredAirbyteStream, state: MutableMapping[str, Any]
):
stream_name = configured_stream.stream.name
use_incremental = configured_stream.sync_mode == SyncMode.incremental and client.stream_has_state(stream_name)
if use_incremental and state.get(stream_name):
logger.info(f"Set state of {stream_name} stream to {state.get(stream_name)}")
client.set_stream_state(stream_name, state.get(stream_name))
logger.info(f"Syncing {stream_name} stream")
for record in client.read_stream(configured_stream.stream):
now = int(datetime.now().timestamp()) * 1000
message = AirbyteRecordMessage(stream=stream_name, data=record, emitted_at=now)
yield AirbyteMessage(type=MessageType.RECORD, record=message)
if use_incremental and client.get_stream_state(stream_name):
state[stream_name] = client.get_stream_state(stream_name)
# output state object only together with other stream states
yield AirbyteMessage(type=MessageType.STATE, state=AirbyteStateMessage(data=state))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.data_class import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-25 16:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_type', models.CharField(choices=[('BU', 'Business'), ('PE', 'Personal')], max_length=2)),
('city', models.CharField(max_length=32, null=True)),
('country_code', models.CharField(max_length=2, null=True)),
('cc_id', models.CharField(max_length=36)),
('line1', models.CharField(max_length=100, null=True)),
('line2', models.CharField(max_length=100, null=True)),
('line3', models.CharField(max_length=100, null=True)),
('postal_code', models.CharField(max_length=10, null=True)),
('state', models.CharField(max_length=20, null=True)),
('state_code', models.CharField(max_length=2, null=True)),
('sub_postal_code', models.CharField(max_length=20, null=True)),
],
),
migrations.CreateModel(
name='ConstantContactList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cc_id', models.IntegerField()),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('name', models.CharField(max_length=48)),
('created_date', models.DateTimeField()),
('modified_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirmed', models.NullBooleanField()),
('company_name', models.CharField(max_length=100, null=True)),
('created_date', models.DateTimeField()),
('first_name', models.CharField(max_length=50, null=True)),
('middle_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('cc_id', models.IntegerField()),
('cc_modified_date', models.DateTimeField()),
('prefix_name', models.CharField(max_length=10, null=True)),
('job_title', models.CharField(max_length=50, null=True)),
('source', models.CharField(max_length=50, null=True)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('addresses', models.ManyToManyField(to='datacombine.Address')),
],
),
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('confirm_status', models.CharField(choices=[('CO', 'Confirmed'), ('NC', 'No Confirmation Required')], max_length=3)),
('cc_id', models.CharField(max_length=36)),
('status', models.CharField(choices=[('UN', 'Unconfirmed'), ('AC', 'Active'), ('OP', 'Optout'), ('RE', 'Removed'), ('NO', 'Non Subscriber')], max_length=2)),
('opt_in_date', models.DateTimeField(null=True)),
('opt_out_date', models.DateTimeField(null=True)),
('email_address', models.EmailField(max_length=254)),
('opt_in_source', models.CharField(choices=[('AO', 'Action by Owner'), ('AV', 'Action by Visitor')], max_length=2)),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField()),
('cc_id', models.CharField(max_length=36)),
('modified_date', models.DateTimeField()),
('note', models.TextField()),
],
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area_code', models.CharField(max_length=3, null=True)),
('number', models.CharField(max_length=7)),
('extension', models.CharField(max_length=7, null=True)),
],
),
migrations.CreateModel(
name='UserStatusOnCCList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('AC', 'Active'), ('HI', 'Hidden')], max_length=2)),
('cclist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.ConstantContactList')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='datacombine.Contact')),
],
),
migrations.AddField(
model_name='contact',
name='cc_lists',
field=models.ManyToManyField(through='datacombine.UserStatusOnCCList', to='datacombine.ConstantContactList'),
),
migrations.AddField(
model_name='contact',
name='cell_phone',
field=models.ManyToManyField(related_name='_contact_cell_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='email_addresses',
field=models.ManyToManyField(to='datacombine.EmailAddress'),
),
migrations.AddField(
model_name='contact',
name='fax',
field=models.ManyToManyField(related_name='_contact_fax_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='home_phone',
field=models.ManyToManyField(related_name='_contact_home_phone_+', to='datacombine.Phone'),
),
migrations.AddField(
model_name='contact',
name='notes',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='datacombine.Note'),
),
migrations.AddField(
model_name='contact',
name='work_phone',
field=models.ManyToManyField(related_name='_contact_work_phone_+', to='datacombine.Phone'),
),
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue583-v1", "issue583-v2"])
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports for problem modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import six
from six.moves import range # pylint: disable=redefined-builtin
MODULES = [
"tensor2tensor.data_generators.algorithmic",
"tensor2tensor.data_generators.algorithmic_math",
"tensor2tensor.data_generators.algorithmic_math_deepmind",
"tensor2tensor.data_generators.algorithmic_math_two_variables",
"tensor2tensor.data_generators.allen_brain",
"tensor2tensor.data_generators.audio",
"tensor2tensor.data_generators.babi_qa",
"tensor2tensor.data_generators.bair_robot_pushing",
"tensor2tensor.data_generators.celeba",
"tensor2tensor.data_generators.celebahq",
"tensor2tensor.data_generators.cifar",
"tensor2tensor.data_generators.cipher",
"tensor2tensor.data_generators.cnn_dailymail",
"tensor2tensor.data_generators.cola",
"tensor2tensor.data_generators.common_voice",
"tensor2tensor.data_generators.desc2code",
"tensor2tensor.data_generators.fsns",
"tensor2tensor.data_generators.function_docstring",
"tensor2tensor.data_generators.gene_expression",
"tensor2tensor.data_generators.google_robot_pushing",
"tensor2tensor.data_generators.gym_env",
"tensor2tensor.data_generators.ice_parsing",
"tensor2tensor.data_generators.imagenet",
"tensor2tensor.data_generators.image_lsun",
"tensor2tensor.data_generators.imdb",
"tensor2tensor.data_generators.lambada",
"tensor2tensor.data_generators.librispeech",
"tensor2tensor.data_generators.lm1b",
"tensor2tensor.data_generators.lm1b_imdb",
"tensor2tensor.data_generators.lm1b_mnli",
"tensor2tensor.data_generators.mnist",
"tensor2tensor.data_generators.mrpc",
"tensor2tensor.data_generators.mscoco",
"tensor2tensor.data_generators.multinli",
"tensor2tensor.data_generators.paraphrase_ms_coco",
"tensor2tensor.data_generators.program_search",
"tensor2tensor.data_generators.ocr",
"tensor2tensor.data_generators.pointer_generator_word",
"tensor2tensor.data_generators.problem_hparams",
"tensor2tensor.data_generators.ptb",
"tensor2tensor.data_generators.qnli",
"tensor2tensor.data_generators.quora_qpairs",
"tensor2tensor.data_generators.rte",
"tensor2tensor.data_generators.scitail",
"tensor2tensor.data_generators.snli",
"tensor2tensor.data_generators.stanford_nli",
"tensor2tensor.data_generators.style_transfer",
"tensor2tensor.data_generators.squad",
"tensor2tensor.data_generators.sst_binary",
"tensor2tensor.data_generators.subject_verb_agreement",
"tensor2tensor.data_generators.timeseries",
"tensor2tensor.data_generators.transduction_problems",
"tensor2tensor.data_generators.translate_encs",
"tensor2tensor.data_generators.translate_ende",
"tensor2tensor.data_generators.translate_enet",
"tensor2tensor.data_generators.translate_enfr",
"tensor2tensor.data_generators.translate_enid",
"tensor2tensor.data_generators.translate_enmk",
"tensor2tensor.data_generators.translate_envi",
"tensor2tensor.data_generators.translate_enzh",
"tensor2tensor.data_generators.video_generated",
"tensor2tensor.data_generators.vqa",
"tensor2tensor.data_generators.wiki",
"tensor2tensor.data_generators.wiki_lm",
"tensor2tensor.data_generators.wiki_revision",
"tensor2tensor.data_generators.wiki_multi_problems",
"tensor2tensor.data_generators.wikisum.wikisum",
"tensor2tensor.data_generators.wikitext103",
"tensor2tensor.data_generators.wsj_parsing",
"tensor2tensor.data_generators.wnli",
"tensor2tensor.data_generators.yelp_polarity",
"tensor2tensor.data_generators.yelp_full",
"tensor2tensor.envs.mujoco_problems",
"tensor2tensor.envs.tic_tac_toe_env_problem",
]
ALL_MODULES = list(MODULES)
def _is_import_err_msg(err_str, module):
parts = module.split(".")
suffixes = [".".join(parts[i:]) for i in range(len(parts))]
return err_str in (
["No module named %s" % suffix for suffix in suffixes] +
["No module named '%s'" % suffix for suffix in suffixes])
def _handle_errors(errors):
"""Log out and possibly reraise errors during import."""
if not errors:
return
log_all = True # pylint: disable=unused-variable
err_msg = "T2T: skipped importing {num_missing} data_generators modules."
print(err_msg.format(num_missing=len(errors)))
for module, err in errors:
err_str = str(err)
if not _is_import_err_msg(err_str, module):
print("From module %s" % module)
raise err
if log_all:
print("Did not import module: %s; Cause: %s" % (module, err_str))
def import_modules(modules):
errors = []
for module in modules:
try:
importlib.import_module(module)
except ImportError as error:
errors.append((module, error))
_handle_errors(errors)
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')x35iofgd9whv29s3y2(o2sifgs$c5l(r4*9tm+hu)%daqpxs6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = '/vol/web/static'
MEDIA_ROOT = '/vol/web/media'
AUTH_USER_MODEL = 'core.User'
|
"""
.. module:: dataset
:synopsis: dataset for sequence labeling
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import pickle
import random
import functools
import itertools
from tqdm import tqdm
class SeqDataset(object):
"""
Dataset for Sequence Labeling
Parameters
----------
dataset : ``list``, required.
The encoded dataset (outputs of preprocess scripts).
w_pad : ``int``, required.
The pad index for the word-level inputs.
c_con : ``int``, required.
The index of connect character token for character-level inputs.
c_pad : ``int``, required.
The pad index for the character-level inputs.
y_start : ``int``, required.
The index of the start label token.
y_pad : ``int``, required.
The index of the pad label token.
y_size : ``int``, required.
The size of the tag set.
batch_size: ``int``, required.
Batch size.
"""
def __init__(self,
dataset: list,
w_pad: int,
c_con: int,
c_pad: int,
y_start: int,
y_pad: int,
y_size: int,
batch_size: int,
if_shuffle: bool = True):
super(SeqDataset, self).__init__()
self.w_pad = w_pad
self.c_con = c_con
self.c_pad = c_pad
self.y_pad = y_pad
self.y_size = y_size
self.y_start = y_start
self.batch_size = batch_size
self.if_shuffle = if_shuffle
self.construct_index(dataset)
if self.if_shuffle:
self.shuffle()
def shuffle(self):
"""
shuffle dataset
"""
random.shuffle(self.shuffle_list)
def get_tqdm(self, device):
"""
construct dataset reader and the corresponding tqdm.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
"""
return tqdm(self.reader(device), mininterval=2, total=self.index_length // self.batch_size, leave=False, file=sys.stdout, ncols=80)
def construct_index(self, dataset):
"""
construct index for the dataset.
Parameters
----------
dataset: ``list``, required.
the encoded dataset (outputs of preprocess scripts).
"""
for instance in dataset:
c_len = [len(tup)+1 for tup in instance[1]]
c_ins = [tup for ins in instance[1] for tup in (ins + [self.c_con])]
instance[1] = c_ins
instance.append(c_len)
self.dataset = dataset
self.index_length = len(dataset)
self.shuffle_list = list(range(0, self.index_length))
def reader(self, device):
"""
construct dataset reader.
Parameters
----------
device: ``torch.device``, required.
the target device for the dataset loader.
Returns
-------
reader: ``iterator``.
A lazy iterable object
"""
cur_idx = 0
while cur_idx < self.index_length:
end_index = min(cur_idx + self.batch_size, self.index_length)
batch = [self.dataset[self.shuffle_list[index]] for index in range(cur_idx, end_index)]
cur_idx = end_index
yield self.batchify(batch, device)
if self.if_shuffle:
self.shuffle()
def batchify(self, batch, device):
"""
batchify a batch of data and move to a device.
Parameters
----------
batch: ``list``, required.
a sample from the encoded dataset (outputs of preprocess scripts).
device: ``torch.device``, required.
the target device for the dataset loader.
"""
cur_batch_size = len(batch)
char_padded_len = max([len(tup[1]) for tup in batch])
word_padded_len = max([len(tup[0]) for tup in batch])
tmp_batch = [list() for ind in range(8)]
for instance_ind in range(cur_batch_size):
instance = batch[instance_ind]
"""
instance[0]: a list of words
[1]: a list of chars
[2]: a list of tags
[3]: a list of word_lens
tmp_batch[0]: f_c, forward character
[1]: f_p, forward char pos
[2]: b_c, back char
[3]: b_p, back char pos
[4]: f_w, forw word
[5]: f_y, forw tag
[6]: f_y_m, forw tag masl
[7]: g_y, a list of tags
"""
char_padded_len_ins = char_padded_len - len(instance[1])
word_padded_len_ins = word_padded_len - len(instance[0])
tmp_batch[0].append(instance[1] + [self.c_pad] + [self.c_pad] * char_padded_len_ins)
tmp_batch[2].append([self.c_pad] + instance[1][::-1] + [self.c_pad] * char_padded_len_ins)
tmp_p = list( itertools.accumulate(instance[3]+[1]+[0]* word_padded_len_ins) )
tmp_batch[1].append([(x - 1) * cur_batch_size + instance_ind for x in tmp_p])
tmp_p = list(itertools.accumulate([1]+instance[3][::-1]))[::-1] + [1]*word_padded_len_ins
tmp_batch[3].append([(x - 1) * cur_batch_size + instance_ind for x in tmp_p])
tmp_batch[4].append(instance[0] + [self.w_pad] + [self.w_pad] * word_padded_len_ins)
tmp_batch[5].append([self.y_start * self.y_size + instance[2][0]] + [instance[2][ind] * self.y_size + instance[2][ind+1] for ind in range(len(instance[2]) - 1)] + [instance[2][-1] * self.y_size + self.y_pad] + [self.y_pad * self.y_size + self.y_pad] * word_padded_len_ins)
tmp_batch[6].append([1] * len(instance[2]) + [1] + [0] * word_padded_len_ins)
tmp_batch[7].append(instance[2])
tbt = [torch.LongTensor(v).transpose(0, 1).contiguous() for v in tmp_batch[0:6]] + [torch.ByteTensor(tmp_batch[6]).transpose(0, 1).contiguous()]
tbt[1] = tbt[1].view(-1)
tbt[3] = tbt[3].view(-1)
return [ten.to(device) for ten in tbt] + [tmp_batch[7]]
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# performance scenario configuration for various languages
import math
WARMUP_SECONDS = 5
JAVA_WARMUP_SECONDS = 15 # Java needs more warmup time for JIT to kick in.
BENCHMARK_SECONDS = 30
SMOKETEST = 'smoketest'
SCALABLE = 'scalable'
INPROC = 'inproc'
SWEEP = 'sweep'
DEFAULT_CATEGORIES = (SCALABLE, SMOKETEST)
SECURE_SECARGS = {
'use_test_ca': True,
'server_host_override': 'foo.test.google.fr'
}
HISTOGRAM_PARAMS = {
'resolution': 0.01,
'max_possible': 60e9,
}
# target number of RPCs outstanding on across all client channels in
# non-ping-pong tests (since we can only specify per-channel numbers, the
# actual target will be slightly higher)
OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
# wide is the number of client channels in multi-channel tests (1 otherwise)
WIDE = 64
def _get_secargs(is_secure):
if is_secure:
return SECURE_SECARGS
else:
return None
def remove_nonproto_fields(scenario):
"""Removes special-purpose fields that don't belong in the protobuf.
This function removes additional information about the scenario that is not
included in the ScenarioConfig protobuf message.
"""
scenario.pop('CATEGORIES', None)
scenario.pop('CLIENT_LANGUAGE', None)
scenario.pop('SERVER_LANGUAGE', None)
scenario.pop('EXCLUDED_POLL_ENGINES', None)
return scenario
def geometric_progression(start, stop, step):
n = start
while n < stop:
yield int(round(n))
n *= step
def _payload_type(use_generic_payload, req_size, resp_size):
r = {}
sizes = {
'req_size': req_size,
'resp_size': resp_size,
}
if use_generic_payload:
r['bytebuf_params'] = sizes
else:
r['simple_params'] = sizes
return r
def _load_params(offered_load):
r = {}
if offered_load is None:
r['closed_loop'] = {}
else:
load = {}
load['offered_load'] = offered_load
r['poisson'] = load
return r
def _add_channel_arg(config, key, value):
if 'channel_args' in config:
channel_args = config['channel_args']
else:
channel_args = []
config['channel_args'] = channel_args
arg = {'name': key}
if isinstance(value, int):
arg['int_value'] = value
else:
arg['str_value'] = value
channel_args.append(arg)
def _ping_pong_scenario(name,
rpc_type,
client_type,
server_type,
secure=True,
use_generic_payload=False,
req_size=0,
resp_size=0,
unconstrained_client=None,
client_language=None,
server_language=None,
async_server_threads=0,
client_processes=0,
server_processes=0,
server_threads_per_cq=0,
client_threads_per_cq=0,
warmup_seconds=WARMUP_SECONDS,
categories=None,
channels=None,
outstanding=None,
num_clients=None,
resource_quota_size=None,
messages_per_stream=None,
excluded_poll_engines=None,
minimal_stack=False,
offered_load=None):
"""Creates a basic ping pong scenario."""
scenario = {
'name': name,
'num_servers': 1,
'num_clients': 1,
'client_config': {
'client_type': client_type,
'security_params': _get_secargs(secure),
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'client_processes': client_processes,
'threads_per_cq': client_threads_per_cq,
'rpc_type': rpc_type,
'histogram_params': HISTOGRAM_PARAMS,
'channel_args': [],
},
'server_config': {
'server_type': server_type,
'security_params': _get_secargs(secure),
'async_server_threads': async_server_threads,
'server_processes': server_processes,
'threads_per_cq': server_threads_per_cq,
'channel_args': [],
},
'warmup_seconds': warmup_seconds,
'benchmark_seconds': BENCHMARK_SECONDS,
'CATEGORIES': list(DEFAULT_CATEGORIES),
'EXCLUDED_POLL_ENGINES': [],
}
if resource_quota_size:
scenario['server_config']['resource_quota_size'] = resource_quota_size
if use_generic_payload:
if server_type != 'ASYNC_GENERIC_SERVER':
raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
scenario['server_config']['payload_config'] = _payload_type(
use_generic_payload, req_size, resp_size)
scenario['client_config']['payload_config'] = _payload_type(
use_generic_payload, req_size, resp_size)
# Optimization target of 'throughput' does not work well with epoll1 polling
# engine. Use the default value of 'blend'
optimization_target = 'throughput'
if unconstrained_client:
outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
unconstrained_client]
# clamp buffer usage to something reasonable (16 gig for now)
MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
outstanding_calls = max(1,
MAX_MEMORY_USE / max(req_size, resp_size))
wide = channels if channels is not None else WIDE
deep = int(math.ceil(1.0 * outstanding_calls / wide))
scenario[
'num_clients'] = num_clients if num_clients is not None else 0 # use as many clients as available.
scenario['client_config']['outstanding_rpcs_per_channel'] = deep
scenario['client_config']['client_channels'] = wide
scenario['client_config']['async_client_threads'] = 0
if offered_load is not None:
optimization_target = 'latency'
else:
scenario['client_config']['outstanding_rpcs_per_channel'] = 1
scenario['client_config']['client_channels'] = 1
scenario['client_config']['async_client_threads'] = 1
optimization_target = 'latency'
scenario['client_config']['load_params'] = _load_params(offered_load)
optimization_channel_arg = {
'name': 'grpc.optimization_target',
'str_value': optimization_target
}
scenario['client_config']['channel_args'].append(optimization_channel_arg)
scenario['server_config']['channel_args'].append(optimization_channel_arg)
if minimal_stack:
_add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
_add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
if messages_per_stream:
scenario['client_config']['messages_per_stream'] = messages_per_stream
if client_language:
# the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
scenario['CLIENT_LANGUAGE'] = client_language
if server_language:
# the SERVER_LANGUAGE field is recognized by run_performance_tests.py
scenario['SERVER_LANGUAGE'] = server_language
if categories:
scenario['CATEGORIES'] = categories
if excluded_poll_engines:
# The polling engines for which this scenario is excluded
scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
return scenario
class Language(object):
@property
def safename(self):
return str(self)
class CXXLanguage(Language):
@property
def safename(self):
return 'cxx'
def worker_cmdline(self):
return ['cmake/build/qps_worker']
def worker_port_offset(self):
return 0
def scenarios(self):
# TODO(ctiller): add 70% load latency test
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
unconstrained_client='async',
outstanding=100,
channels=1,
num_clients=1,
secure=False,
categories=[INPROC] + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_streaming_from_client_1channel_1MB',
rpc_type='STREAMING_FROM_CLIENT',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
unconstrained_client='async',
outstanding=1,
channels=1,
num_clients=1,
secure=False,
categories=[SMOKETEST] + [INPROC] + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=300,
resp_size=50,
unconstrained_client='async',
outstanding=30000,
channels=300,
offered_load=37500,
secure=False,
async_server_threads=16,
server_threads_per_cq=1,
categories=[SCALABLE])
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else [])
inproc_categories = ([INPROC] if not secure else [])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_ping_pong_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
for mps in geometric_progression(1, 20, 10):
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
(mps, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
(mps, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=[SWEEP])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
rpc_type='STREAMING',
req_size=1024 * 1024,
resp_size=1024 * 1024,
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE],
channels=1,
outstanding=100)
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
secstr,
rpc_type='STREAMING',
req_size=64 * 1024,
resp_size=64 * 1024,
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async-limited',
use_generic_payload=True,
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
% secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
client_threads_per_cq=2,
server_threads_per_cq=2,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' %
secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async-limited',
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
% secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
client_threads_per_cq=2,
server_threads_per_cq=2,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async-limited',
secure=secure,
client_threads_per_cq=1000000,
server_threads_per_cq=1000000,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
client_threads_per_cq=2,
server_threads_per_cq=2,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async-limited',
use_generic_payload=True,
async_server_threads=1,
minimal_stack=not secure,
secure=secure)
yield _ping_pong_scenario(
'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
% (secstr),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
% (secstr),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
outstanding=64,
req_size=128,
resp_size=8 * 1024 * 1024,
secure=secure,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
% secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
secure=secure,
minimal_stack=not secure,
categories=smoketest_categories + inproc_categories +
[SCALABLE])
for rpc_type in [
'unary', 'streaming', 'streaming_from_client',
'streaming_from_server'
]:
for synchronicity in ['sync', 'async']:
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_ping_pong_%s' %
(synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
async_server_threads=1,
minimal_stack=not secure,
secure=secure)
for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
8):
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
(synchronicity, rpc_type, secstr, size),
rpc_type=rpc_type.upper(),
req_size=size,
resp_size=size,
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
categories=[SWEEP])
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
(synchronicity, rpc_type, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
server_threads_per_cq=3,
client_threads_per_cq=3,
categories=inproc_categories + [SCALABLE])
# TODO(vjpai): Re-enable this test. It has a lot of timeouts
# and hasn't yet been conclusively identified as a test failure
# or race in the library
# yield _ping_pong_scenario(
# 'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
# rpc_type=rpc_type.upper(),
# client_type='%s_CLIENT' % synchronicity.upper(),
# server_type='%s_SERVER' % synchronicity.upper(),
# unconstrained_client=synchronicity,
# secure=secure,
# categories=smoketest_categories+[SCALABLE],
# resource_quota_size=500*1024)
if rpc_type == 'streaming':
for mps in geometric_progression(1, 20, 10):
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
% (synchronicity, rpc_type, mps, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=inproc_categories + [SCALABLE])
for mps in geometric_progression(1, 200, math.sqrt(10)):
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
% (synchronicity, rpc_type, mps, secstr),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
messages_per_stream=mps,
minimal_stack=not secure,
categories=[SWEEP])
for channels in geometric_progression(
1, 20000, math.sqrt(10)):
for outstanding in geometric_progression(
1, 200000, math.sqrt(10)):
if synchronicity == 'sync' and outstanding > 1200:
continue
if outstanding < channels:
continue
yield _ping_pong_scenario(
'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
% (synchronicity, rpc_type, secstr, channels,
outstanding),
rpc_type=rpc_type.upper(),
client_type='%s_CLIENT' % synchronicity.upper(),
server_type='%s_SERVER' % synchronicity.upper(),
unconstrained_client=synchronicity,
secure=secure,
minimal_stack=not secure,
categories=[SWEEP],
channels=channels,
outstanding=outstanding)
def __str__(self):
return 'c++'
class CSharpLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_csharp.sh']
def worker_port_offset(self):
return 100
def scenarios(self):
yield _ping_pong_scenario('csharp_generic_async_streaming_ping_pong',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_generic_async_streaming_ping_pong_insecure_1MB',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_generic_async_streaming_qps_unconstrained_insecure',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=False,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario('csharp_protobuf_async_streaming_ping_pong',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER')
yield _ping_pong_scenario('csharp_protobuf_async_unary_ping_pong',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_protobuf_sync_to_async_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER')
yield _ping_pong_scenario(
'csharp_protobuf_async_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_protobuf_async_streaming_qps_unconstrained',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
categories=[SCALABLE])
yield _ping_pong_scenario('csharp_to_cpp_protobuf_sync_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
server_language='c++',
async_server_threads=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_async_streaming_ping_pong',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
async_server_threads=1)
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
server_language='c++',
categories=[SCALABLE])
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='sync',
server_language='c++',
categories=[SCALABLE])
yield _ping_pong_scenario(
'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
client_language='c++',
categories=[SCALABLE])
yield _ping_pong_scenario('csharp_protobuf_async_unary_ping_pong_1MB',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE])
def __str__(self):
return 'csharp'
class PythonLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_python.sh']
def worker_port_offset(self):
return 500
def scenarios(self):
yield _ping_pong_scenario('python_generic_sync_streaming_ping_pong',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario('python_protobuf_sync_streaming_ping_pong',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER')
yield _ping_pong_scenario('python_protobuf_async_unary_ping_pong',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER')
yield _ping_pong_scenario('python_protobuf_sync_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_protobuf_sync_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'python_protobuf_sync_streaming_qps_unconstrained',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario('python_to_cpp_protobuf_sync_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
async_server_threads=0,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_to_cpp_protobuf_sync_streaming_ping_pong',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
async_server_threads=1)
yield _ping_pong_scenario('python_protobuf_sync_unary_ping_pong_1MB',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE])
def __str__(self):
return 'python'
class PythonAsyncIOLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_python_asyncio.sh']
def worker_port_offset(self):
return 1200
def scenarios(self):
for outstanding in [64, 128, 256, 512]:
for channels in [1, 4]:
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_ping_pong_%dx%d_max' %
(
outstanding,
channels,
),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
outstanding=outstanding * channels,
channels=channels,
client_processes=0,
server_processes=0,
unconstrained_client='async',
categories=[SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_ping_pong_%d_1thread' %
outstanding,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
outstanding=outstanding,
channels=1,
client_processes=1,
server_processes=1,
unconstrained_client='async',
categories=[SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_generic_async_streaming_ping_pong',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
channels=1,
client_processes=1,
server_processes=1,
use_generic_payload=True,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_streaming_ping_pong',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_ping_pong',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_ping_pong',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
unconstrained_client='async')
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_streaming_qps_unconstrained',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
unconstrained_client='async')
yield _ping_pong_scenario(
'python_asyncio_to_cpp_protobuf_async_unary_ping_pong_1thread',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
channels=1,
client_processes=1,
unconstrained_client='async',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_to_cpp_protobuf_async_unary_ping_pong_max',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
channels=1,
client_processes=0,
server_language='c++',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario(
'python_asyncio_to_cpp_protobuf_sync_streaming_ping_pong_1thread',
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
channels=1,
client_processes=1,
server_processes=1,
unconstrained_client='async',
server_language='c++')
yield _ping_pong_scenario(
'python_asyncio_protobuf_async_unary_ping_pong_1MB',
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
channels=1,
client_processes=1,
server_processes=1,
categories=[SMOKETEST, SCALABLE])
def __str__(self):
return 'python_asyncio'
class RubyLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_ruby.sh']
def worker_port_offset(self):
return 300
def scenarios(self):
yield _ping_pong_scenario('ruby_protobuf_sync_streaming_ping_pong',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario('ruby_protobuf_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
categories=[SMOKETEST, SCALABLE])
yield _ping_pong_scenario('ruby_protobuf_sync_unary_qps_unconstrained',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario(
'ruby_protobuf_sync_streaming_qps_unconstrained',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='sync')
yield _ping_pong_scenario('ruby_to_cpp_protobuf_sync_unary_ping_pong',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
server_language='c++',
async_server_threads=1)
yield _ping_pong_scenario(
'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
server_language='c++',
async_server_threads=1)
yield _ping_pong_scenario('ruby_protobuf_unary_ping_pong_1MB',
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
req_size=1024 * 1024,
resp_size=1024 * 1024,
categories=[SMOKETEST, SCALABLE])
def __str__(self):
return 'ruby'
class Php7Language(Language):
def __init__(self, php7_protobuf_c=False):
super().__init__()
self.php7_protobuf_c = php7_protobuf_c
def worker_cmdline(self):
if self.php7_protobuf_c:
return [
'tools/run_tests/performance/run_worker_php.sh',
'--use_protobuf_c_extension'
]
return ['tools/run_tests/performance/run_worker_php.sh']
def worker_port_offset(self):
if self.php7_protobuf_c:
return 900
return 800
def scenarios(self):
php7_extension_mode = 'php7_protobuf_php_extension'
if self.php7_protobuf_c:
php7_extension_mode = 'php7_protobuf_c_extension'
yield _ping_pong_scenario('%s_to_cpp_protobuf_sync_unary_ping_pong' %
php7_extension_mode,
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
server_language='c++',
async_server_threads=1)
yield _ping_pong_scenario(
'%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
server_language='c++',
async_server_threads=1)
# TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
# better than async_server_threads=0/CPU usage 490%.
yield _ping_pong_scenario(
'%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
php7_extension_mode,
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
outstanding=1,
async_server_threads=1,
unconstrained_client='sync')
yield _ping_pong_scenario(
'%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
php7_extension_mode,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='c++',
outstanding=1,
async_server_threads=1,
unconstrained_client='sync')
def __str__(self):
if self.php7_protobuf_c:
return 'php7_protobuf_c'
return 'php7'
class JavaLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_java.sh']
def worker_port_offset(self):
return 400
def scenarios(self):
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
'java_generic_async_streaming_ping_pong_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories)
yield _ping_pong_scenario(
'java_protobuf_async_streaming_ping_pong_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS)
yield _ping_pong_scenario('java_protobuf_async_unary_ping_pong_%s' %
secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories)
yield _ping_pong_scenario('java_protobuf_unary_ping_pong_%s' %
secstr,
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS)
yield _ping_pong_scenario(
'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=smoketest_categories + [SCALABLE])
yield _ping_pong_scenario(
'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE])
yield _ping_pong_scenario(
'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS,
categories=[SCALABLE])
yield _ping_pong_scenario(
'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async-limited',
use_generic_payload=True,
async_server_threads=1,
secure=secure,
warmup_seconds=JAVA_WARMUP_SECONDS)
# TODO(jtattermusch): add scenarios java vs C++
def __str__(self):
return 'java'
class GoLanguage(Language):
def worker_cmdline(self):
return ['tools/run_tests/performance/run_worker_go.sh']
def worker_port_offset(self):
return 600
def scenarios(self):
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario('go_generic_sync_streaming_ping_pong_%s' %
secstr,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories)
yield _ping_pong_scenario(
'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure)
yield _ping_pong_scenario('go_protobuf_sync_unary_ping_pong_%s' %
secstr,
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
async_server_threads=1,
secure=secure,
categories=smoketest_categories)
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
rpc_type='UNARY',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=smoketest_categories + [SCALABLE])
# unconstrained_client='async' is intended (client uses goroutines)
yield _ping_pong_scenario(
'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='SYNC_SERVER',
unconstrained_client='async',
secure=secure,
categories=[SCALABLE])
# unconstrained_client='async' is intended (client uses goroutines)
# ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
# but that's mostly because of lack of better name of the enum value.
yield _ping_pong_scenario(
'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
rpc_type='STREAMING',
client_type='SYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
categories=[SCALABLE])
# TODO(jtattermusch): add scenarios go vs C++
def __str__(self):
return 'go'
class NodeLanguage(Language):
def __init__(self, node_purejs=False):
super().__init__()
self.node_purejs = node_purejs
def worker_cmdline(self):
fixture = 'native_js' if self.node_purejs else 'native_native'
return [
'tools/run_tests/performance/run_worker_node.sh', fixture,
'--benchmark_impl=grpc'
]
def worker_port_offset(self):
if self.node_purejs:
return 1100
return 1000
def scenarios(self):
node_implementation = 'node_purejs' if self.node_purejs else 'node'
for secure in [True, False]:
secstr = 'secure' if secure else 'insecure'
smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
yield _ping_pong_scenario(
'%s_to_node_generic_async_streaming_ping_pong_%s' %
(node_implementation, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
server_language='node',
use_generic_payload=True,
async_server_threads=1,
secure=secure,
categories=smoketest_categories)
yield _ping_pong_scenario(
'%s_to_node_protobuf_async_streaming_ping_pong_%s' %
(node_implementation, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='node',
async_server_threads=1,
secure=secure)
yield _ping_pong_scenario(
'%s_to_node_protobuf_async_unary_ping_pong_%s' %
(node_implementation, secstr),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='node',
async_server_threads=1,
secure=secure,
categories=smoketest_categories)
yield _ping_pong_scenario(
'%s_to_node_protobuf_async_unary_qps_unconstrained_%s' %
(node_implementation, secstr),
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='node',
unconstrained_client='async',
secure=secure,
categories=smoketest_categories + [SCALABLE])
yield _ping_pong_scenario(
'%s_to_node_protobuf_async_streaming_qps_unconstrained_%s' %
(node_implementation, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
server_language='node',
unconstrained_client='async',
secure=secure,
categories=[SCALABLE])
yield _ping_pong_scenario(
'%s_to_node_generic_async_streaming_qps_unconstrained_%s' %
(node_implementation, secstr),
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
server_language='node',
unconstrained_client='async',
use_generic_payload=True,
secure=secure,
categories=[SCALABLE])
# TODO(murgatroid99): add scenarios node vs C++
def __str__(self):
if self.node_purejs:
return 'node_purejs'
return 'node'
LANGUAGES = {
'c++': CXXLanguage(),
'csharp': CSharpLanguage(),
'ruby': RubyLanguage(),
'php7': Php7Language(),
'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
'java': JavaLanguage(),
'python': PythonLanguage(),
'python_asyncio': PythonAsyncIOLanguage(),
'go': GoLanguage(),
'node': NodeLanguage(),
'node_purejs': NodeLanguage(node_purejs=True)
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
try:
from ez_setup import use_setuptools
use_setuptools()
except:
pass
from setuptools import setup
try:
import six
py3 = six.PY3
except:
py3 = sys.version_info[0] >= 3
# metadata
import re
_version_re = re.compile(r'__version__\s*=\s*"(.*)"')
_authors_re = re.compile(r'__authors__\s*=\s*"(.*)"')
_url_re = re.compile(r'__url__\s*=\s*"(.*)"')
for line in open('SPARQLWrapper/__init__.py'):
version_match = _version_re.match(line)
if version_match:
version = version_match.group(1)
authors_match = _authors_re.match(line)
if authors_match:
authors = authors_match.group(1)
url_match = _url_re.match(line)
if url_match:
url = url_match.group(1)
# requirements
with open('requirements.txt', 'r') as f:
_install_requires = [line.rstrip('\n') for line in f]
setup(
name = 'SPARQLWrapper',
version = version,
description = 'SPARQL Endpoint interface to Python',
long_description = 'This is a wrapper around a SPARQL service. It helps in creating the query URI and, possibly, convert the result into a more manageable format.',
license = 'W3C SOFTWARE NOTICE AND LICENSE',
author = authors,
url = url,
download_url = 'https://github.com/RDFLib/sparqlwrapper/releases',
platforms = ['any'],
packages = ['SPARQLWrapper'],
install_requires = _install_requires,
extras_require = {
'keepalive': ['keepalive>=0.5'],
},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: W3C License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords = ['python', 'sparql', 'rdf', 'rdflib'],
use_2to3 = True,
project_urls={
'Home': 'https://rdflib.github.io/sparqlwrapper/',
'Documentation': 'https://rdflib.github.io/sparqlwrapper/doc/',
'Source': 'https://github.com/RDFLib/sparqlwrapper',
'Tracker': 'https://github.com/RDFLib/sparqlwrapper/issues',
}
)
|
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Dict, Iterable, Mapping, MutableMapping
from airbyte_cdk.connector import Connector
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import AirbyteCatalog, AirbyteMessage, ConfiguredAirbyteCatalog
class Source(Connector, ABC):
# can be overridden to change an input state
def read_state(self, state_path: str) -> Dict[str, Any]:
if state_path:
state_obj = json.loads(open(state_path, "r").read())
else:
state_obj = {}
state = defaultdict(dict, state_obj)
return state
# can be overridden to change an input catalog
def read_catalog(self, catalog_path: str) -> ConfiguredAirbyteCatalog:
return ConfiguredAirbyteCatalog.parse_obj(self.read_config(catalog_path))
@abstractmethod
def read(
self, logger: AirbyteLogger, config: Mapping[str, Any], catalog: ConfiguredAirbyteCatalog, state: MutableMapping[str, Any] = None
) -> Iterable[AirbyteMessage]:
"""
Returns a generator of the AirbyteMessages generated by reading the source with the given configuration, catalog, and state.
"""
@abstractmethod
def discover(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> AirbyteCatalog:
"""
Returns an AirbyteCatalog representing the available streams and fields in this integration. For example, given valid credentials to a
Postgres database, returns an Airbyte catalog where each postgres table is a stream, and each table column is a field.
"""
|
# Gumowski-Mira Strange Attractor
# http://en.wikipedia.org/wiki/Attractor
# FB - 201012072
import random
from PIL import Image
imgx = 800
imgy = 600
maxIt = 50000 # number of pixels to draw
# drawing area (xa < xb and ya < yb)
xa = -20.0
xb = 20.0
ya = -20.0
yb = 20.0
def f(x):
return a * x + 2.0 * (1.0 - a) * x * x / (1.0 + x * x)
def gm(x, y):
xnew = b * y + f(x)
y = -x + f(xnew)
x = xnew
return (x, y)
while True:
image = Image.new("L", (imgx, imgy)) # clear the image
a = random.random() * 1.5 - 1.0
b = random.random() * 0.1 + 0.9
x = random.random() * (xb - xa) + xa
y = random.random() * (yb - ya) + ya
pixelCtr = 0
for i in range(maxIt):
(x, y) = gm(x, y)
xi = int((imgx - 1) * (x - xa) / (xb - xa))
yi = int((imgy - 1) * (y - ya) / (yb - ya))
if xi >=0 and xi < imgx and yi >= 0 and yi < imgy:
if image.getpixel((xi, yi)) == 0:
image.putpixel((xi, yi), 255)
pixelCtr += 1
if 100 * pixelCtr / maxIt > 10: # retry until a good attractor is found
break
image.save("strange_attractor.png", "PNG")
|
KWARGS_MANAGER_SECRET = 'FXV/#X=>fMT,pc-wm3BYaxqoZ7VOA+'
class KwargsManager:
def purify(self, **kwargs):
_dict = {}
for key, value in kwargs.items():
if value is not KWARGS_MANAGER_SECRET:
_dict[key] = value
return _dict
def build(self, _dict, data, data_key, dict_key=None):
if dict_key:
_dict[dict_key] = data.get(data_key, KWARGS_MANAGER_SECRET)
else:
_dict[data_key] = data.get(data_key, KWARGS_MANAGER_SECRET)
|
from __future__ import print_function, unicode_literals
import importlib
import os
import sys
from django.apps import apps
from django.db.models.fields import NOT_PROVIDED
from django.utils import datetime_safe, six, timezone
from django.utils.six.moves import input
from .loader import MigrationLoader
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None, dry_run=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
self.dry_run = dry_run
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path = MigrationLoader(None, load=False).migrations_module(app_config.label)
if migrations_import_path is None:
# It's an application with migrations disabled.
return self.defaults.get("ask_initial", False)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
if hasattr(migrations_module, "__file__"):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"Do you really want to merge these migrations?"
return self.defaults.get("ask_merge", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def _ask_default(self):
print("Please enter the default value now, as valid Python")
print("The datetime and django.utils.timezone modules are available, so you can do e.g. timezone.now")
while True:
if six.PY3:
# Six does not correctly abstract over the fact that
# py3 input returns a unicode string, while py2 raw_input
# returns a bytestring.
code = input(">>> ")
else:
code = input(">>> ").decode(sys.stdin.encoding)
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe, "timezone": timezone})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
if not self.dry_run:
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default; "
"we can't do that (the database needs something to populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
("Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"),
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
if not self.dry_run:
choice = self._choice_input(
"You are trying to change the nullable field '%s' on %s to non-nullable "
"without a default; we can't do that (the database needs something to "
"populate existing rows).\n"
"Please select a fix:" % (field_name, model_name),
[
("Provide a one-off default now (will be set on all existing "
"rows with a null value for this column)"),
("Ignore for now, and let me handle existing rows with NULL myself "
"(e.g. because you added a RunPython or RunSQL operation to handle "
"NULL values in a previous data migration)"),
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
return NOT_PROVIDED
elif choice == 3:
sys.exit(3)
else:
return self._ask_default()
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]"
return self._boolean_input(msg % (model_name, old_name, model_name, new_name,
field_instance.__class__.__name__), False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
msg = "Did you rename the %s.%s model to %s? [y/N]"
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
def ask_merge(self, app_label):
return self._boolean_input(
"\nMerging will only work if the operations printed above do not conflict\n" +
"with each other (working on different fields or models)\n" +
"Do you want to merge these migration branches? [y/N]",
False,
)
class NonInteractiveMigrationQuestioner(MigrationQuestioner):
def ask_not_null_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
sys.exit(3)
def ask_not_null_alteration(self, field_name, model_name):
# We can't ask the user, so set as not provided.
return NOT_PROVIDED
|
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.stats import stats
from src.tools.poi import select_poi
from src.data.ascad import TraceCategory
def statistical_moment(traces: np.array, moment=1):
"""
Retrieves a statistical moment in a given order for a given set of traces.
The moment
"""
if moment == 1:
return traces.mean(axis=0)
if moment == 2:
return traces.var(axis=0)
if moment == 3:
return stats.skew(traces, axis=0)
if moment == 4:
return stats.kurtosis(traces, axis=0)
raise Exception("Moment not implemented.")
def calc_moment_difference(left_1, left_2, right, moment=1):
"""
Calculates the difference in statistical moment between power traces with
equal keys and power traces with different keys.
"""
def smt(a):
return statistical_moment(a, moment)
dist_neq = abs(smt(left_1) - smt(right))
dist_eq = abs(smt(left_1) - smt(left_2))
return dist_neq - dist_eq
def random_slice(traces, num_slices, even_slices=True):
"""
Randomly slices up a given NumPy array.
"""
total = len(traces)
if even_slices:
total -= total % num_slices
indexes = list(range(total))
np.random.shuffle(indexes)
ixs_sliced = np.array_split(indexes, num_slices)
return np.array([traces[s] for s in ixs_sliced])
def get_moment_differences(tc: TraceCategory, trace_size=ASCAD.trace_len, max_moment=3):
"""
Calculates the difference in statistical moment between power traces with
equal keys and power traces with different keys, up to a given order of
statistical moment.
"""
mdiff = np.zeros((max_moment + 1, trace_size))
for stat_moment in range(1, max_moment + 1):
low, high = tc.filter_by_hw(False), tc.filter_by_hw(True)
low_1, low_2 = random_slice(low, 2)
mdiff[stat_moment] = calc_moment_difference(low_1, low_2, high)
return mdiff
def plot_poi(mdiff, moment):
"""
Plots moment difference with points of interest.
"""
fig, ax = plt.subplots()
title = f"Difference in statistical moment ({moment}) between traces with" \
f"equal and\ntraces with different keys, Points of Interest are highlighted.\n"
sns.lineplot(data=mdiff[moment]).set_title(title)
for a, b in select_poi(mdiff[moment]):
ax.axvspan(a, b, alpha=0.3, color=sns.color_palette()[3])
plt.show()
def plot_poi_trace(trace, poi):
"""
Plots power trace with points of interest.
"""
fig, ax = plt.subplots()
title = f"Some power trace, Points of Interest from\nstatistical moment (1) are highlighted.\n"
sns.lineplot(data=trace, palette=[sns.color_palette()[4]]).set_title(title)
for a, b in poi:
ax.axvspan(a, b, alpha=0.3, color=sns.color_palette()[3])
plt.show()
if __name__ == '__main__':
ascad = ASCAD()
moment_diff = get_moment_differences(ascad.default.profile)
plot_poi(moment_diff, 1)
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Arrayfire(CMakePackage, CudaPackage):
"""ArrayFire is a high performance software library for parallel computing
with an easy-to-use API. Its array based function set makes parallel
programming more accessible."""
homepage = "http://arrayfire.org/docs/index.htm"
git = "https://github.com/arrayfire/arrayfire.git"
version('master', submodules=True)
version('3.7.3', submodules=True, tag='v3.7.3')
version('3.7.2', submodules=True, tag='v3.7.2')
version('3.7.0', submodules=True, tag='v3.7.0')
variant('cuda', default=False, description='Enable Cuda backend')
variant('forge', default=False, description='Enable graphics library')
variant('opencl', default=False, description='Enable OpenCL backend')
depends_on('boost@1.65:')
depends_on('fftw-api@3:')
depends_on('blas')
depends_on('cuda@7.5:', when='+cuda')
depends_on('cudnn', when='+cuda')
depends_on('opencl +icd', when='+opencl')
# TODO add more opencl backends:
# currently only Cuda backend is enabled
# https://github.com/arrayfire/arrayfire/wiki/Build-Instructions-for-Linux#opencl-backend-dependencies
depends_on('fontconfig', when='+forge')
depends_on('glfw@3.1.4:', when='+forge')
@property
def libs(self):
query_parameters = self.spec.last_query.extra_parameters
libraries = []
if 'cpu' in query_parameters:
libraries.append('libafcpu')
if 'cuda' in query_parameters and '+cuda' in self.spec:
libraries.append('libafcuda')
if 'opencl' in query_parameters and '+opencl' in self.spec:
libraries.append('libafopencl')
if not query_parameters or 'unified' in query_parameters:
libraries.append('libaf')
return find_libraries(libraries, root=self.prefix, recursive=True)
def cmake_args(self):
args = []
args.extend([
self.define_from_variant('AF_BUILD_CUDA', 'cuda'),
self.define_from_variant('AF_BUILD_FORGE', 'forge'),
self.define_from_variant('AF_BUILD_OPENCL', 'opencl'),
])
if '^mkl' in self.spec:
args.append('-DUSE_CPU_MKL=ON')
if '%intel' not in self.spec:
args.append('-DMKL_THREAD_LAYER=GNU OpenMP')
return args
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import tensorflow as tf
from fedopt_guide.stackoverflow_transformer import centralized_main
class CentralizedMainTest(tf.test.TestCase):
def test_run_centralized(self):
num_epochs = 1
batch_size = 16
root_output_dir = self.create_tempdir()
exp_name = 'test_run_centralized'
centralized_main.run_centralized(
tf.keras.optimizers.SGD(learning_rate=0.01),
num_epochs,
batch_size,
vocab_size=10,
dim_embed=2,
dim_model=2,
dim_hidden=2,
num_heads=1,
num_layers=1,
experiment_name=exp_name,
root_output_dir=root_output_dir,
max_batches=100)
self.assertTrue(tf.io.gfile.exists(root_output_dir))
log_dir = os.path.join(root_output_dir, 'logdir', exp_name)
train_log_dir = os.path.join(log_dir, 'train')
validation_log_dir = os.path.join(log_dir, 'validation')
self.assertTrue(tf.io.gfile.exists(log_dir))
self.assertTrue(tf.io.gfile.exists(train_log_dir))
self.assertTrue(tf.io.gfile.exists(validation_log_dir))
results_dir = os.path.join(root_output_dir, 'results', exp_name)
self.assertTrue(tf.io.gfile.exists(results_dir))
metrics_file = os.path.join(results_dir, 'metric_results.csv')
self.assertTrue(tf.io.gfile.exists(metrics_file))
metrics_csv = pd.read_csv(metrics_file)
self.assertLen(
metrics_csv.index,
num_epochs,
msg='The output metrics CSV should have {} rows, equal to the number of'
'training epochs.'.format(num_epochs))
self.assertIn(
'loss',
metrics_csv.columns,
msg='The output metrics CSV should have a column "loss" if training is'
'successful.')
self.assertIn(
'val_loss',
metrics_csv.columns,
msg='The output metrics CSV should have a column "val_loss" if '
'validation metric computation is successful.')
if __name__ == '__main__':
tf.test.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import unittest
import k3modutil
import k3ut
dd = k3ut.dd
class TestModutil(unittest.TestCase):
def setUp(self):
sys.path.append(os.path.dirname(__file__))
module_tree = [
'root0',
'root0.mod0',
'root0.mod0.mod00',
'root0.mod0.mod01',
'root0.mod1',
'root0.mod1.mod10',
'root0.mod2',
'root1',
'root2',
]
for module in module_tree:
__import__(module)
def tearDown(self):
sys.path.remove(os.path.dirname(__file__))
def test_submodules(self):
test_cases = [
(sys.modules['root0'], {
'mod0': sys.modules['root0.mod0'],
'mod1': sys.modules['root0.mod1'],
'mod2': sys.modules['root0.mod2'], }, None),
(sys.modules['root1'], {}, None),
(sys.modules['root2'], None, None),
({}, None, AttributeError),
]
for root, rst_expected, error in test_cases:
dd('case: ', root, rst_expected, error)
try:
rst = k3modutil.submodules(root)
except Exception as e:
self.assertEqual(type(e), error)
else:
dd('rst: ', rst)
self.assertEqual(rst, rst_expected)
def test_submodule_tree(self):
test_cases = [
( sys.modules['root0'],
{
'mod0': {'module': sys.modules['root0.mod0'],
'children': {
'mod00': {'module': sys.modules['root0.mod0.mod00'],
'children': {},
},
'mod01': {'module': sys.modules['root0.mod0.mod01'],
'children': None,
},
},
},
'mod1': {'module': sys.modules['root0.mod1'],
'children': {
'mod10': {'module': sys.modules['root0.mod1.mod10'],
'children': None,
},
},
},
'mod2': {'module': sys.modules['root0.mod2'],
'children': None,
},
},
None, ),
( sys.modules['root1'],
{},
None, ),
( sys.modules['root2'],
None,
None, ),
( {},
None,
AttributeError,),
]
for root, rst_expected, error in test_cases:
dd('case: ', root, rst_expected, error)
try:
rst = k3modutil.submodule_tree(root)
except Exception as e:
self.assertEqual(type(e), error)
else:
dd('rst: ', rst)
self.assertEqual(rst, rst_expected)
def test_submodule_leaf_tree(self):
test_cases = [
( sys.modules['root0'],
{
'mod0': { 'mod00': {},
'mod01':sys.modules['root0.mod0.mod01'],
},
'mod1': { 'mod10': sys.modules['root0.mod1.mod10'], },
'mod2': sys.modules['root0.mod2'],
},
None, ),
( sys.modules['root1'],
{},
None, ),
( sys.modules['root2'],
None,
None, ),
( {},
None,
AttributeError, ),
]
for root, rst_expected, error in test_cases:
dd('case: ', root, rst_expected, error)
try:
rst = k3modutil.submodule_leaf_tree(root)
except Exception as e:
self.assertEqual(type(e), error)
else:
dd('rst: ', rst)
self.assertEqual(rst, rst_expected)
|
#!/usr/bin python3
# -*- coding: utf-8 -*-
# 斐波那契数列计算
def fbi(n):
if n == 1 or n == 2:
return 1
return fbi(n-1) + fbi(n-2)
n = eval(input())
print(fbi(n))
|
class DirectMeta(type):
def __init__(cls, arg1, arg2):
print a<caret>rg1
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
# 作业平台任务状态参照表
TASK_RESULT = [
(0, '状态未知'),
(1, '未执行'),
(2, '正在执行'),
(3, '执行成功'),
(4, '执行失败'),
(5, '跳过'),
(6, '忽略错误'),
(7, '等待用户'),
(8, '手动结束'),
(9, '状态异常'),
(10, '步骤强制终止中'),
(11, '步骤强制终止成功'),
(12, '步骤强制终止失败'),
(-1, '接口调用失败'),
]
"""
import traceback
import re
from functools import partial
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow import StaticIntervalGenerator
from pipeline.core.flow.activity import Service
from pipeline.core.flow.io import (
StringItemSchema,
IntItemSchema,
)
from env import JOB_LOG_VAR_SEARCH_CUSTOM_PATTERNS
from gcloud.conf import settings
from gcloud.utils.handlers import handle_api_error
from pipeline_plugins.components.utils.common import batch_execute_func
# 作业状态码: 1.未执行; 2.正在执行; 3.执行成功; 4.执行失败; 5.跳过; 6.忽略错误; 7.等待用户; 8.手动结束;
# 9.状态异常; 10.步骤强制终止中; 11.步骤强制终止成功; 12.步骤强制终止失败
JOB_SUCCESS = {3}
JOB_VAR_TYPE_IP = 2
LOG_VAR_SEARCH_CONFIGS = [{"re": r"<SOPS_VAR>(.+?)</SOPS_VAR>", "kv_sep": ":"}]
for custom_patterns in JOB_LOG_VAR_SEARCH_CUSTOM_PATTERNS:
LOG_VAR_SEARCH_CONFIGS.append(custom_patterns)
__group_name__ = _("作业平台(JOB)")
get_client_by_user = settings.ESB_GET_CLIENT_BY_USER
job_handle_api_error = partial(handle_api_error, __group_name__)
def get_sops_var_dict_from_log_text(log_text, service_logger):
"""
在日志文本中提取全局变量
:param service_logger:
:param log_text: 日志文本,如下:
"<SOPS_VAR>key1:value1</SOPS_VAR>\ngsectl\n-rwxr-xr-x 1 root<SOPS_VAR>key2:value2</SOPS_VAR>\n"
或者已转义的日志文本
<SOPS_VAR>key2:value2</SOPS_VAR>
:return:
{"key1": "value1", "key2": "value2"}
"""
sops_var_dict = {}
# 逐行匹配以便打印全局变量所在行
service_logger.info("search log var with config: {}".format(LOG_VAR_SEARCH_CONFIGS))
for index, log_line in enumerate(log_text.splitlines(), 1):
for var_search_config in LOG_VAR_SEARCH_CONFIGS:
reg = var_search_config["re"]
excape_reg = reg.replace("<", "<").replace(">", ">")
kv_sep = var_search_config["kv_sep"]
sops_key_val_list = re.findall(reg, log_line)
sops_key_val_list.extend(re.findall(excape_reg, log_line))
if len(sops_key_val_list) == 0:
continue
for sops_key_val in sops_key_val_list:
if kv_sep not in sops_key_val:
continue
sops_key, sops_val = sops_key_val.split(kv_sep, 1)
# 限制变量名不为空
if len(sops_key) == 0:
continue
sops_var_dict.update({sops_key: sops_val})
service_logger.info(
_("[{group}]提取日志中全局变量,匹配行[{index}]:[{line}]").format(group=__group_name__, index=index, line=log_line)
)
return sops_var_dict
def get_job_sops_var_dict(client, service_logger, job_instance_id, bk_biz_id):
"""
解析作业日志:默认取每个步骤/节点的第一个ip_logs
:param client:
:param service_logger: 组件日志对象
:param job_instance_id: 作业实例id
:param bk_biz_id 业务ID
获取到的job_logs实例
[
{
"status": 3,
"step_results": [
{
"tag": "",
"ip_logs": [
{
"total_time": 0.363,
"ip": "1.1.1.1",
"start_time": "2020-06-15 17:23:11 +0800",
"log_content": "<SOPS_VAR>key1:value1</SOPS_VAR>\ngsectl\n-rwxr-xr-x 1",
"exit_code": 0,
"bk_cloud_id": 0,
"retry_count": 0,
"end_time": "2020-06-15 17:23:11 +0800",
"error_code": 0
},
],
"ip_status": 9
}
],
"is_finished": true,
"step_instance_id": 12321,
"name": "查看文件"
},
]
:return:
- success { "result": True, "data": {"key1": "value1"}}
- fail { "result": False, "message": message}
"""
get_job_instance_status_kwargs = {
"job_instance_id": job_instance_id,
"bk_biz_id": bk_biz_id,
"return_ip_result": True,
}
get_job_instance_status_return = client.jobv3.get_job_instance_status(get_job_instance_status_kwargs)
if not get_job_instance_status_return["result"]:
message = handle_api_error(
__group_name__,
"jobv3.get_job_instance_status",
get_job_instance_status_kwargs,
get_job_instance_status_return,
)
service_logger.warning(message)
return {"result": False, "message": message}
# 根据每个步骤的IP(可能有多个),循环查询作业执行日志
log_list = []
for step_instance in get_job_instance_status_return["data"]["step_instance_list"]:
if "step_ip_result_list" not in step_instance:
continue
# 为了防止查询时间过长,每个步骤只取一个IP的日志进行记录
if step_instance["step_ip_result_list"]:
step_ip_result = step_instance["step_ip_result_list"][0]
get_job_instance_ip_log_kwargs = {
"job_instance_id": job_instance_id,
"bk_biz_id": bk_biz_id,
"step_instance_id": step_instance["step_instance_id"],
"bk_cloud_id": step_ip_result["bk_cloud_id"],
"ip": step_ip_result["ip"],
}
get_job_instance_ip_log_kwargs_return = client.jobv3.get_job_instance_ip_log(get_job_instance_ip_log_kwargs)
if not get_job_instance_ip_log_kwargs_return["result"]:
message = handle_api_error(
__group_name__,
"jobv3.get_job_instance_ip_log_kwargs",
get_job_instance_ip_log_kwargs,
get_job_instance_ip_log_kwargs_return,
)
service_logger.warning(message)
return {"result": False, "message": message}
log_content = get_job_instance_ip_log_kwargs_return["data"]["log_content"]
if log_content:
log_list.append(str(log_content))
log_text = "\n".join(log_list)
return {"result": True, "data": get_sops_var_dict_from_log_text(log_text, service_logger)}
class JobService(Service):
__need_schedule__ = True
reload_outputs = True
need_get_sops_var = False
def execute(self, data, parent_data):
pass
def schedule(self, data, parent_data, callback_data=None):
try:
job_instance_id = callback_data.get("job_instance_id", None)
status = callback_data.get("status", None)
except Exception as e:
err_msg = "invalid callback_data: {}, err: {}"
self.logger.error(err_msg.format(callback_data, traceback.format_exc()))
data.outputs.ex_data = err_msg.format(callback_data, e)
return False
if not job_instance_id or not status:
data.outputs.ex_data = "invalid callback_data, job_instance_id: %s, status: %s" % (job_instance_id, status)
self.finish_schedule()
return False
if status in JOB_SUCCESS:
if self.reload_outputs:
client = data.outputs.client
# 全局变量重载
get_var_kwargs = {
"bk_biz_id": data.get_one_of_inputs("biz_cc_id", parent_data.inputs.biz_cc_id),
"job_instance_id": job_instance_id,
}
global_var_result = client.job.get_job_instance_global_var_value(get_var_kwargs)
self.logger.info("get_job_instance_global_var_value return: {}".format(global_var_result))
if not global_var_result["result"]:
message = job_handle_api_error(
"job.get_job_instance_global_var_value", get_var_kwargs, global_var_result,
)
self.logger.error(message)
data.outputs.ex_data = message
self.finish_schedule()
return False
global_var_list = global_var_result["data"].get("job_instance_var_values", [])
if global_var_list:
for global_var in global_var_list[-1]["step_instance_var_values"]:
if global_var["category"] != JOB_VAR_TYPE_IP:
data.set_outputs(global_var["name"], global_var["value"])
# 无需提取全局变量的Service直接返回
if not self.need_get_sops_var:
self.finish_schedule()
return True
get_job_sops_var_dict_return = get_job_sops_var_dict(
data.outputs.client,
self.logger,
job_instance_id,
data.get_one_of_inputs("biz_cc_id", parent_data.inputs.biz_cc_id),
)
if not get_job_sops_var_dict_return["result"]:
self.logger.warning(
_("{group}.{job_service_name}: 提取日志失败,{message}").format(
group=__group_name__,
job_service_name=self.__class__.__name__,
message=get_job_sops_var_dict_return["message"],
)
)
data.set_outputs("log_outputs", {})
self.finish_schedule()
return True
log_outputs = get_job_sops_var_dict_return["data"]
self.logger.info(
_("{group}.{job_service_name}:输出日志提取变量为:{log_outputs}").format(
group=__group_name__, job_service_name=self.__class__.__name__, log_outputs=log_outputs
)
)
data.set_outputs("log_outputs", log_outputs)
self.finish_schedule()
return True
else:
data.set_outputs(
"ex_data",
{
"exception_msg": _("任务执行失败,<a href='{job_inst_url}' target='_blank'>前往作业平台(JOB)查看详情</a>").format(
job_inst_url=data.outputs.job_inst_url
),
"task_inst_id": job_instance_id,
"show_ip_log": True,
},
)
self.finish_schedule()
return False
def outputs_format(self):
return [
self.OutputItem(
name=_("JOB任务ID"),
key="job_inst_id",
type="int",
schema=IntItemSchema(description=_("提交的任务在 JOB 平台的实例 ID")),
),
self.OutputItem(
name=_("JOB任务链接"),
key="job_inst_url",
type="string",
schema=StringItemSchema(description=_("提交的任务在 JOB 平台的 URL")),
),
]
class JobScheduleService(JobService):
__need_schedule__ = True
interval = StaticIntervalGenerator(5)
def schedule(self, data, parent_data, callback_data=None):
if hasattr(data.outputs, "requests_error") and data.outputs.requests_error:
data.outputs.ex_data = "{}\n Get Result Error:\n".format(data.outputs.requests_error)
else:
data.outputs.ex_data = ""
params_list = [
{"bk_biz_id": data.inputs.biz_cc_id, "job_instance_id": job_id}
for job_id in data.outputs.job_id_of_batch_execute
]
client = get_client_by_user(parent_data.inputs.executor)
batch_result_list = batch_execute_func(client.job.get_job_instance_log, params_list, interval_enabled=True)
# 重置查询 job_id
data.outputs.job_id_of_batch_execute = []
# 解析查询结果
running_task_list = []
for job_result in batch_result_list:
result = job_result["result"]
job_id_str = job_result["params"]["job_instance_id"]
job_urls = [url for url in data.outputs.job_inst_url if str(job_id_str) in url]
job_detail_url = job_urls[0] if job_urls else ""
if result["result"]:
log_content = "{}\n".format(result["data"][0]["step_results"][0]["ip_logs"][0]["log_content"])
job_status = result["data"][0]["status"]
# 成功状态
if job_status == 3:
data.outputs.success_count += 1
# 失败状态
elif job_status > 3:
data.outputs.ex_data += (
"任务执行失败,<a href='{}' target='_blank'>前往作业平台(JOB)查看详情</a>"
"\n错误信息:{}\n".format(job_detail_url, log_content)
)
else:
running_task_list.append(job_id_str)
else:
data.outputs.ex_data += "任务执行失败,<a href='{}' target='_blank'>前往作业平台(JOB)查看详情</a>\n".format(
job_detail_url
)
# 需要继续轮询的任务
data.outputs.job_id_of_batch_execute = running_task_list
# 结束调度
if not data.outputs.job_id_of_batch_execute:
# 没有报错信息
if not data.outputs.ex_data:
del data.outputs.ex_data
self.finish_schedule()
return data.outputs.final_res and data.outputs.success_count == data.outputs.request_success_count
|
from setuptools import setup
setup(
name = 'ftpknocker',
packages = ['ftpknocker'],
version = '1.1.1',
license = 'MIT',
description = 'ftpknocker is a multi-threaded scanner for finding anonymous FTP servers',
author = 'Kevin Kennell',
author_email = 'kevin@kennell.de',
install_requires=[
'click',
'netaddr'
],
url = 'https://github.com/kennell/ftpknocker',
keywords = ['ftp', 'security'],
classifiers = [],
entry_points={
'console_scripts': [
'ftpknocker = ftpknocker.cli:main'
]
}
)
|
""" smoke.py: smoke tests for JS9, calling much of the public API """
import time
import sys
import json
import pyjs9
from astropy.io import fits
from smokesubs import *
def fitsioTest(j, file):
"""
test FITS IO routines
"""
tfits = "foo.fits"
hdul = fits.open(file)
hdul.info()
displayMessage(j, "j.SetFITS(hdul, %s)" % tfits)
j.SetFITS(hdul, tfits)
waitStatus(j)
displayMessage(j, 'j.SetColormap("cool")')
j.SetColormap("cool")
sleep(2)
def pixTest(j, file=None):
"""
pixel conversion
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
displayMessage(j, 'j.GetImageData()')
imdata = j.GetImageData(False)
displayMessage(j,
" id: %s type: %s width: %d height: %d bitpix: %d"
% (imdata["id"], imdata["imtab"],
imdata["width"], imdata["height"],
imdata["bitpix"]))
displayMessage(j,
" CRPIX: %f %f: CRVAL: %f %f"
% (imdata["header"]["CRPIX1"], imdata["header"]["CRPIX2"],
imdata["header"]["CRVAL1"], imdata["header"]["CRVAL2"]))
displayMessage(j, "j.WCSToPix(CRVAL1, CRVAL2)")
obj = j.WCSToPix(imdata["header"]["CRVAL1"], imdata["header"]["CRVAL2"])
abs1 = abs(obj["x"] - imdata["header"]["CRPIX1"])
abs2 = abs(obj["y"] - imdata["header"]["CRPIX2"])
if abs1 < 1 and abs2 < 1:
displayMessage(j, " %f %f" % (obj["x"], obj["y"]))
else:
raise ValueError("wrong WCSToPix")
displayMessage(j, "j.ImageToDisplayPos(obj.x, obj.y)")
dpos = j.ImageToDisplayPos({"x": obj["x"], "y": obj["y"]})
displayMessage(j, " %f %f" % (dpos["x"], dpos["y"]))
displayMessage(j, "j.DisplayToImagePos(dpos.x, dpos.y)")
ipos = j.DisplayToImagePos({"x": dpos["x"], "y": dpos["y"]})
displayMessage(j, " %f %f" % (ipos["x"], ipos["y"]))
displayMessage(j, "j.PixToWCS(CRPIX1, CRPIX2)")
obj = j.PixToWCS(imdata["header"]["CRPIX1"], imdata["header"]["CRPIX2"])
abs1 = abs(obj["ra"] - imdata["header"]["CRVAL1"])
abs2 = abs(obj["dec"] - imdata["header"]["CRVAL2"])
if abs1 < 0.001 and abs2 < 0.001:
displayMessage(j, " %f %f" % (obj["ra"], obj["dec"]))
else:
raise ValueError("wrong WCSToPix")
displayMessage(j, 'j.GetValPos(ipos)')
valpos = j.GetValPos(ipos, False)
displayMessage(j, ' %s'
% valpos["vstr"].replace(" ", " "))
sleep()
def headerTest(j, file=None):
"""
get header
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
displayMessage(j, 'j.GetFITSHeader(True)')
header = j.GetFITSHeader(True).split("\n")
displayMessage(j, " found %d cards" % len(header))
sleep()
def dispCoordsTest(j, file=None):
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.LoadRegions("tests/dcoords.reg")')
j.LoadRegions("tests/dcoords.reg")
waitStatus(j, "LoadRegions")
displayMessage(j, 'j.GetRegions()')
obj = j.GetRegions()
if len(obj) == 13:
displayMessage(j, " found 13 regions")
else:
raise ValueError("incorrect number of regions (%d)" % len(obj))
sleep(2)
def zoomTest(j, file=None):
"""
bin an image (binary table)
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
for i in [0.5, 2, 4, 2, 1, 0.5]:
displayMessage(j, 'j.SetZoom(zoom: %f)' % (i))
j.SetZoom(i)
sleep()
def binTest(j, file=None):
"""
bin an image (binary table)
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
for i in [0.5, 2, 4, 2, 1, 0.5]:
if i in (0.5, 2):
xfilter = "pi == pha"
else:
xfilter = ""
displayMessage(j, 'j.DisplaySection(bin: %f, filter: %s)'
% (i, xfilter))
j.DisplaySection({"bin":i, "filter": xfilter})
waitStatus(j, "DisplaySection")
def rotateTest(j, file=None):
"""
rotate an image
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
displayMessage(j, 'j.RotateData(45)')
j.RotateData(45)
waitStatus(j, "ReprojectData")
sleep()
displayMessage(j, 'j.RotateData(0)')
j.RotateData(0)
waitStatus(j, "ReprojectData")
sleep()
def filterRGBTest(j, file=None):
"""
image processing filters (changes RGB data, not image data)
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log", "colormap": "heat"}')
displayMessage(j, 'j.FilterRGBImage("emboss")')
j.FilterRGBImage("emboss")
sleep()
def loadWindowTest(j, xfrom, xto):
"""
load a new window, move image to/from
"""
displayMessage(j, 'j.LoadWindow({"id": "%s"}, "light")' % xto)
j.LoadWindow("",
{"id": xto, "clone": xfrom},
"light",
"",
"width=512px,height=598px,left=10,top=10,resize=1,scrolling=1")
sleep()
displayMessage(j, 'j.MoveToDisplay("%s")' % xto)
j.MoveToDisplay(xto)
sleep()
displayMessage(j, 'j.MoveToDisplay(%s, "{display: %s"})' % (xfrom, xto))
j.MoveToDisplay(xfrom, {"display": xto})
sleep()
def wcsTest(j, file=None):
"""
change WCS system and units
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.GetImageData()')
imdata = j.GetImageData(False)
displayMessage(j,
" id: %s type: %s width: %d height: %d bitpix: %d"
% (imdata["id"], imdata["imtab"],
imdata["width"], imdata["height"], imdata["bitpix"]))
displayMessage(j, 'j.GetWCSSys()')
sysstr = j.GetWCSSys()
displayMessage(j, 'j.GetWCSUnits()')
unitsstr = j.GetWCSUnits()
displayMessage(j, " sys: %s units: %s" % (sysstr, unitsstr))
displayMessage(j, 'j.SetWCSSys("galactic")')
j.SetWCSSys("galactic")
if j.GetWCSSys() != "galactic":
raise ValueError("wrong wcs sys")
displayMessage(j, 'j.SetWCSSys("native")')
j.SetWCSSys("native")
displayMessage(j, 'j.SetWCSUnits("degrees")')
j.SetWCSUnits("degrees")
if j.GetWCSUnits() != "degrees":
raise ValueError("wrong wcs units")
sleep()
displayMessage(j, 'j.SetWCSUnits("sexagesimal")')
j.SetWCSUnits("sexagesimal")
sleep()
def countsTest(j, file=None):
"""
internal counts in regions routine
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.AddRegions("circle")')
j.AddRegions("circle")
displayMessage(j, 'j.CountsinRegions()')
s = j.CountsInRegions("$sregions", {"cmdswitches":"-j"})
if type(s) is dict:
obj = s
else:
obj = json.loads(s)
c = obj["backgroundSubtractedResults"][0]["netCounts"]
displayMessage(j, " counts: %f" % c)
if c != 16703.0:
raise ValueError("wrong counts")
sleep()
displayMessage(j, 'j.RemoveRegions()')
j.RemoveRegions()
def colormapTest(j, file=None):
"""
change colormap in various ways
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
cmap = "data/cmaps/purple_mm.cmap"
displayMessage(j, 'j.LoadColormap(%s)' % cmap)
j.LoadColormap(cmap)
displayMessage(j, 'j.AddColormap("cyan")')
j.AddColormap("cyan",
[[0, 0], [0, 0]], [[0, 0], [1, 1]], [[0, 0], [1, 1]],
{"toplevel": False})
displayMessage(j, 'j.SetColormap(3.4, 0.15)')
j.SetColormap(3.4, 0.15)
sleep()
color0 = j.GetParam("colormap")
displayMessage(j, 'j.SetColormap("purplish")')
j.SetColormap("purplish")
sleep()
displayMessage(j, 'j.GetParam("colormap")')
cmap = j.GetParam("colormap")
displayMessage(j, ' colormap: %s' % j.GetColormap())
displayMessage(j, 'j.SetParam("colormap", "cyan")')
j.SetParam("colormap", "cyan")
displayMessage(j, ' colormap: %s' % j.GetColormap())
sleep()
displayMessage(j, 'j.SetParam("colormap", color0)')
j.SetParam("colormap", color0)
cmap = j.GetParam("colormap")
displayMessage(j, ' colormap: %s' % j.GetColormap())
j.SetColormap(color0)
sleep()
def regionsTest(j, file=None):
"""
manipulate regions
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.LoadRegions("data/casa/casa.reg")')
j.LoadRegions("data/casa/casa.reg")
waitStatus(j, "LoadRegions")
displayMessage(j, 'j.GetRegions()')
obj = j.GetRegions()
if len(obj) == 13:
displayMessage(j, " found 13 regions")
else:
raise ValueError("incorrect number of regions (%d)" % len(obj))
displayMessage(j, 'j.ChangeRegions()')
j.ChangeRegions("text", {"color": "cyan"})
sleep()
displayMessage(j, 'j.RemoveRegions()')
j.RemoveRegions("text")
sleep()
displayMessage(j, 'j.UnremoveRegions()')
j.UnremoveRegions()
sleep()
def shapesTest(j, file=None):
"""
manipulate shapes (like regions, but in arbitrary layers)
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.NewShapeLayer("reg2")')
j.NewShapeLayer("reg2")
displayMessage(j, 'j.AddShapes("reg2" "box, circle")')
# pylint: disable=line-too-long
j.AddShapes("reg2", 'ICRS; box(23:23:12.7,+58:51:07.6,29",29",0); circle(23:23:35.2,+58:50:04.6, 14")')
displayMessage(j, 'j.GetShapes("reg2")')
obj = j.GetShapes("reg2")
if len(obj) == 2:
displayMessage(j, ' added 2 shapes')
else:
raise ValueError("incorrect number of shapes")
displayMessage(j, 'j.ChangeShapes("reg2", {"color": "red"})')
j.ChangeShapes("reg2", {"color": "red"})
sleep()
displayMessage(j, 'j.ShowShapeLayer("reg2", False)')
j.ShowShapeLayer("reg2", False)
sleep()
displayMessage(j, 'j.ShowShapeLayer("reg2", True)')
j.ShowShapeLayer("reg2", True)
sleep()
displayMessage(j, 'j.RemoveShapes("reg2")')
j.RemoveShapes("reg2")
sleep()
def catalogTest(j, file=None):
"""
load a catalog
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.LoadCatalog("cat", "data/casa/casa.cat")')
j.LoadCatalog("cat", "data/casa/casa.cat")
waitStatus(j, "LoadRegions")
sleep()
def blurTest(j, file=None):
"""
gause blur
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.GaussBlurData(2)')
j.GaussBlurData(2)
sleep()
def panTest(j, file=None):
"""
get and set pan
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.SetPan({"px": 4006, "py": 3928})')
j.SetPan({"px": 4006, "py": 3928})
displayMessage(j, 'j.GetPan()')
obj = j.GetPan()
if abs(obj["x"] - 1958) > 2 or (obj["y"] - 2216) > 2:
raise ValueError("incorrect pan")
sleep()
def gridTest(j, file=None):
"""
display coord grid
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"cool"}')
displayMessage(j, 'j.DisplayCoordGrid(True)')
j.DisplayCoordGrid(True)
sleep()
displayMessage(j, 'j.DisplayCoordGrid(False)')
j.DisplayCoordGrid(False)
sleep()
def cubeTest(j, file=None):
"""
display 3D cube data
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log","colormap":"viridis"}')
imdata = j.GetImageData(False)
for i in range(2, 6):
sleep()
displayMessage(j, 'j.DisplaySlice(%d)' % i)
j.DisplaySlice(i)
waitStatus(j, "DisplaySection")
imdata2 = j.GetImageData(False)
if imdata["width"] != imdata2["width"] or imdata["height"] != imdata2["height"]:
raise ValueError("wrong image cube dimensions [%d,%d] [%d,%d]" % (imdata["width"], imdata["height"], imdata2["width"], imdata2["height"]))
displayMessage(j,
" id: %s type: %s width: %d height: %d bitpix: %d"
% (imdata2["id"], imdata2["imtab"],
imdata2["width"], imdata2["height"],
imdata2["bitpix"]))
sleep()
def cubeTest2(j, file=None):
"""
display 3D cube data
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log","colormap":"viridis"}')
j.DisplaySection({"cubecol":"energy:1000:5000:1000", "bitpix":16});
waitStatus(j, "DisplaySection")
j.SetColormap("heat");
j.SetScale("log");
for i in range(2, 5):
sleep()
displayMessage(j, 'j.DisplaySlice(%d)' % i)
j.DisplaySlice(i)
waitStatus(j, "DisplaySection")
sleep()
def analysisTest(j, file=None):
"""
run an server-side analysis test
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"log","colormap":"viridis"}')
displayMessage(j, 'j.GetAnalysis()')
x = j.GetAnalysis()
displayMessage(j, ' found %d analysis routines' % len(x))
displayMessage(j, 'j.RunAnalysis("counts")')
x = j.RunAnalysis("counts").split("\n")[13].split()[1]
if abs(float(x) - 933) < 0.01:
displayMessage(j, " counts: %s" % x)
else:
raise ValueError("incorrect counts: %s" % x)
sleep()
def extTest(j, file=None):
"""
multi-extension FITS
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"viridis"}')
imdata = j.GetImageData(False)
for i in range(3, 5):
sleep()
displayMessage(j, 'j.DisplayExtension(%d)' % i)
j.DisplayExtension(i)
waitStatus(j, "DisplaySection")
imdata2 = j.GetImageData(False)
if imdata["width"] != imdata2["width"] or imdata["height"] != imdata2["height"]:
raise ValueError("wrong image extdimensions [%d,%d] [%d,%d]" % (imdata["width"], imdata["height"], imdata2["width"], imdata2["height"]))
displayMessage(j,
" id: %s type: %s width: %d height: %d bitpix: %d"
% (imdata2["id"], imdata2["imtab"],
imdata2["width"], imdata2["height"],
imdata2["bitpix"]))
sleep()
def xmmProxyTest(j):
"""
retrieve data from XMM archive, blend and display
"""
# pylint: disable=line-too-long
xmmurl = "http://nxsa.esac.esa.int/nxsa-sl/servlet/data-action-aio?obsno=0791580701&name=3COLIM&level=PPS&extension=FTZ"
closeImage(j)
displayMessage(j, "load xmm archive via proxy ...")
j.LoadProxy(xmmurl,
{"colormap":"red", "scale":"log",
"scalemin": 0, "contrast": 9.2, "bias": 0.047})
waitStatus(j)
displayMessage(j, 'j.DisplaySlice(2)')
j.DisplaySlice(2,
{"separate": True, "colormap":"green", "scale":"log",
"scalemin": 0, "contrast": 9.2, "bias": 0.047})
sleep()
displayMessage(j, 'j.DisplaySlice(3)')
j.DisplaySlice(3,
{"separate": True, "colormap":"blue", "scale":"log",
"scalemin": 0, "contrast": 9.2, "bias": 0.047})
sleep()
displayMessage(j, 'j.SetRGBMode(True)')
j.SetRGBMode(True)
sleep(4)
displayMessage(j, 'j.SetRGBMode(False)')
j.SetRGBMode(False)
sleep()
closeDisplay(j)
def mosaicTest(j, file=None):
"""
create a mosaic from a file
"""
if file:
closeImage(j)
loadImage(j, file, '{"scale":"linear","colormap":"viridis"}')
displayMessage(j, 'j.CreateMosaic("current")')
j.CreateMosaic("current")
waitStatus(j, "CreateMosaic")
displayMessage(j, 'j.SetColormap("magma", 5.13, 0.04)')
j.SetColormap("magma", 5.13, 0.04)
displayMessage(j, 'j.DisplayPlugin("JS9Panner")')
j.DisplayPlugin("JS9Panner")
sleep(2)
displayMessage(j, 'j.DisplayPlugin("panner")')
j.DisplayPlugin("panner")
closeDisplay(j)
def flipAll(j, rots=[90, 10, -90, 15], flips=["x", "y", "x", "y"], bins=[]):
"""
flip and rotate in all combinations
"""
timeout = 1
for ix in range(len(rots)):
rot = rots[ix]
if rot % 90 == 0:
j.SetRot90(rot)
xrot = j.GetRot90()
displayMessage(j, 'j.SetRot90: %d' % (xrot))
else:
j.SetRotate(rot)
xrot = j.GetRotate()
displayMessage(j, 'j.SetRotate: %d' % (xrot))
sleep(timeout)
for iy in range(len(flips)):
flip = flips[iy]
j.SetFlip(flip)
xflip = j.GetFlip()
displayMessage(j, 'j.SetFlip: %s' % (xflip))
sleep(timeout)
if iy < len(bins):
bin = bins[iy]
displayMessage(j, 'j.DisplaySection(bin: %f)' % bin)
j.DisplaySection({"bin":bin, "xcen":0, "ycen":0})
waitStatus(j, "DisplaySection")
sleep(timeout)
def flipRotateTest(j):
"""
flip and rotate image (the regions show wcs/physical update)
"""
# pylint: disable=line-too-long
loadImage(j, 'data/fits/ngc1316.fits', '{"scale":"linear", "contrast":2.93, "bias":0.643}')
displayMessage(j, 'j.AddRegions("circle; ellipse")')
# pylint: disable=line-too-long
j.AddRegions("FK4; ellipse(03:20:47.200, -37:23:08.221, 2.916667', 1.750000', 322.431400); circle(03:22:25.384, -37:14:17.178, 1.051199')")
# pylint: disable=line-too-long
j.AddRegions('physical; ellipse(226.00, 147.00, 28.00, 18.00, 322.4314) {"color":"red"}; circle(58.50, 222.50, 12) {"color":"red"}')
flipAll(j, rots=[90, 15, -90, -100], flips=["x", "y"])
loadImage(j, 'data/fits/sipsample.fits', '{"scale":"log", "colormap": "heat", "contrast": 4.84, "bias": 0.48}')
displayMessage(j, 'j.AddRegions("ellipse; circle")')
# pylint: disable=line-too-long
j.AddRegions('FK5; ellipse(13:29:44.577, +47:10:11.644, 36.686718", 21.417932", 81.620827); circle(13:29:52.660, +47:11:42.560, 36.545208")')
# pylint: disable=line-too-long
j.AddRegions('physical; circle(149.00, 67.00, 33) {"color":"red"}; ellipse(49.00, 76.00, 33, 20, 81.6208) {"color":"red"}')
flipAll(j, rots=[10, 90, -10, -90], flips=["x", "y"])
loadImage(j, 'data/orion/orion_1.fits', {"colormap":"grey"})
# pylint: disable=line-too-long
j.AddRegions('physical; ellipse(414.00, 109.00, 53.75, 20.00, 328.8843); box(500.00, 344.00, 22.00, 22.00, 0.0000); circle(245.00, 392.00, 14.00)')
flipAll(j, rots=[12, 51, 90, -90], flips=["x", "y"])
loadImage(j, 'data/fits/casa.fits.gz', '{"scale":"log", "colormap": "cool"}')
displayMessage(j, 'j.LoadRegions("data/casa/casa.reg")')
j.LoadRegions("data/casa/casa.reg")
flipAll(j, bins=[0.5, 2, 4])
loadImage(j, 'data/fits/squares.fits', {"colormap":"grey"})
# pylint: disable=line-too-long
j.AddRegions('physical; polygon(438.00, 24.00, 498.00, 24.00, 468.00, 84.00) {"text":"white","textOpts":{"px":466,"py":97}}; box(52.00, 452.00, 60.00, 60.00, 0.0000) {"text":"black","textOpts":{"px":52,"py":470}}; circle(459.00, 462.00, 30.00) {"text":"darkgrey","textOpts":{"px":459,"py":421}}; ellipse(57.00, 43.00, 30.00, 20.00, 0.0000) {"text":"lightgrey","textOpts":{"px":57,"py":77}}')
flipAll(j, rots=[90, 95, -90, -95], flips=["x", "y"])
sleep()
closeDisplay(j)
# pylint: disable=too-many-statements
def blendTest(j):
"""
blend images
"""
closeDisplay(j)
j.BlendDisplay(False)
loadImage(j, 'data/blend/chandra.fits',
'{"scale":"linear","colormap":"sls","contrast":5.78,"bias":0.15}')
displayMessage(j, 'j.SetScale(log)')
j.SetScale("log")
displayMessage(j, 'j.GetScale()')
obj = j.GetScale()
if obj["scale"] != "log":
raise ValueError("incorrect scale")
displayMessage(j, 'j.SetZoom(2)')
j.SetZoom(2)
displayMessage(j, 'j.GetZoom()')
val = j.GetZoom()
if val != 2:
raise ValueError("incorrect zoom")
sleep()
displayMessage(j, 'j.SetColormap("red")')
j.SetColormap("red")
displayMessage(j, 'j.GetColormap()')
obj = j.GetColormap()
if obj["colormap"] != "red":
raise ValueError("incorrect colormap")
displayMessage(j, 'j.BlendImage("screen", 1, True)')
j.BlendImage('screen', 1, True)
sleep()
# pylint: disable=line-too-long
loadImage(j, 'data/blend/galex.fits',
'{"scale":"log","colormap":"green","contrast":6.25,"bias":0.25}')
displayMessage(j, 'j.ReprojectData("chandra.fits")')
j.ReprojectData('chandra.fits')
waitStatus(j, "ReprojectData")
displayMessage(j, 'j.SetColormap("green", 5.6, 0.74)')
j.SetColormap('green', 5.6, 0.74)
displayMessage(j, 'j.SetZoom(2)')
j.SetZoom(2)
displayMessage(j, 'j.BlendImage("screen", 1, True)')
j.BlendImage('screen', 1, True)
sleep()
# pylint: disable=line-too-long
loadImage(j, 'data/blend/spitzer.fits', '{"scale":"log","colormap":"blue","contrast":6.3,"bias":0.54}')
displayMessage(j, 'j.ReprojectData("chandra.fits")')
j.ReprojectData('chandra.fits')
waitStatus(j, "ReprojectData")
displayMessage(j, 'j.SetColormap("blue", 6.3, 0.54)')
j.SetColormap('blue', 6.3, 0.54)
displayMessage(j, 'j.SetZoom(2)')
j.SetZoom(2)
displayMessage(j, 'j.BlendImage("screen", 1, True)')
j.BlendImage('screen', 1, True)
sleep()
# pylint: disable=line-too-long
loadImage(j, 'data/blend/hst.fits', '{"scale":"log","scaleclipping":"user","scalemin":0,"scalemax":5,"colormap":"heat","contrast":4.0,"bias":0.67}')
displayMessage(j, 'j.ReprojectData("chandra.fits")')
j.ReprojectData('chandra.fits')
waitStatus(j, "ReprojectData")
displayMessage(j, 'j.SetColormap("heat", 3.0, 0.535)')
j.SetColormap('heat', 3.0, 0.535)
displayMessage(j, 'j.SetZoom(2)')
j.SetZoom(2)
displayMessage(j, 'j.BlendImage("screen", 1, True)')
j.BlendImage('screen', 1, True)
displayMessage(j, 'j.Addregions("ellipse; circle")')
j.AddRegions('FK5; ellipse(06:16:27.2, -21:22:31.1, 35.97", 19.19", 20.25) {"color":"cyan"}; circle(06:16:22.1, -21:22:22.8, 14.8")')
displayMessage(j, 'j.GetRegions()')
obj = j.GetRegions()
if len(obj) != 2:
raise ValueError("incorrect number of regions")
displayMessage(j, 'j.CopyRegions("chandra.fits")')
j.CopyRegions("chandra.fits")
displayMessage(j, 'j.RemoveRegions()')
j.RemoveRegions()
displayMessage(j, 'blend the images ...')
displayMessage(j, 'j.BlendDisplay(True)')
j.BlendDisplay(True)
sleep()
displayMessage(j, 'j.GetDisplayData()')
imarr = j.GetDisplayData()
for imdata in imarr:
displayMessage(j, " id: %s type: %s width: %d height: %d bitpix: %d"
% (imdata["id"], imdata["imtab"],
imdata["width"], imdata["height"], imdata["bitpix"]))
displayMessage(j, 'j.DisplayImage("chandra.fits")')
j.DisplayImage("colormap", {"display":"chandra.fits"})
sleep()
def resizeSeparateTest(j):
"""
resize display, separate, gather images in a display
"""
displayMessage(j, 'gather, resize ...')
displayMessage(j, 'j.ResizeDisplay(400,300)')
j.ResizeDisplay(300, 300)
sleep()
displayMessage(j, 'j.SeparateDisplay()')
j.SeparateDisplay()
sleep(2)
displayMessage(j, 'j.GatherDisplay()')
j.GatherDisplay()
sleep(2)
displayMessage(j, 'j.ResizeDisplay("reset")')
j.ResizeDisplay("reset")
def staticColormapTest(j):
"""
add colormaps
"""
if j:
j.AddColormap("mask", [["#ff000080", 1, 31], ["cyan", 32, 32], ["rgba(0,255,0,0.5)", 37, 99], ["blue", 100, "Infinity"]])
def maskBlendTest(j):
"""
load masks
"""
imageFile = 'data/fits/lsst1.fits'
imageId = 'lsst1.fits[1]'
maskId = 'lsst1.fits[2]'
# load the mask first, so it can blend into the image
loadImage(j, imageFile + '[2]')
# use the lsst mask
displayMessage(j, 'j.SetColormap("mask")')
j.SetColormap("mask")
# the mask will be blended using source-atop composition
displayMessage(j, 'j.BlendImage()')
j.BlendImage("source-atop")
# load extension #1: the image data itself
displayMessage(j, 'j.Displayextension(1)')
j.DisplayExtension(1, {"separate": True})
displayMessage(j, "waiting for DisplayExtension ...")
waitStatus(j, "DisplayExtension")
# set some nice image params
displayMessage(j, 'j.SetScale("log")')
j.SetScale("log")
displayMessage(j, 'j.SetColormap("grey")')
j.SetColormap("grey", 6.37, 0.3)
# blend the two images
j.BlendDisplay(True)
# and sync them
displayMessage(j, 'j.SyncImages()')
j.SyncImages(["flip", "pan", "rot90", "zoom"], [maskId])
displayMessage(j, 'j.DisplayPlugin(JS9Blend)')
j.DisplayPlugin("JS9Blend")
sleep(2)
j.DisplayPlugin("JS9Blend")
def maskOverlayTest(j):
"""
load masks
"""
imageFile = 'data/fits/lsst1.fits'
imageId = 'lsst1.fits[1]'
maskId = 'lsst1.fits[2]'
# turn off blending, just in case we were just runing maskblend ...
j.BlendDisplay(False)
loadImage(j, imageFile, '{"scale":"log"}')
displayMessage(j, 'j.SetColormap("grey")')
j.SetColormap("grey", 5.57, 0.28)
displayMessage(j, 'j.Displayextension(2)')
j.DisplayExtension(2, {"separate": True})
displayMessage(j, "waiting for DisplayExtension ...")
waitStatus(j, "DisplayExtension")
displayMessage(j, 'j.SetColormap("mask")')
j.SetColormap("mask")
displayMessage(j, 'j.DisplayImage()')
j.DisplayImage({"display": imageId})
displayMessage(j, 'j.MaskImage()')
j.MaskImage(maskId, '{"mode":"overlay"}')
displayMessage(j, 'j.SyncImages()')
j.SyncImages(["flip", "pan", "rot90", "zoom"], [maskId])
sleep(2)
def smokeTests():
"""
all the tests
"""
j = init()
fitsioTest(j, "data/fits/casa.fits.gz")
pixTest(j, "data/fits/snr.fits")
headerTest(j)
dispCoordsTest(j)
zoomTest(j)
binTest(j)
rotateTest(j)
loadWindowTest(j, "JS9", "myJS9")
filterRGBTest(j)
flipRotateTest(j)
wcsTest(j, "data/fits/casa.fits")
countsTest(j)
colormapTest(j)
regionsTest(j)
shapesTest(j)
catalogTest(j)
blurTest(j)
panTest(j)
gridTest(j)
cubeTest(j, "data/fits/jupiter_cube.fits")
analysisTest(j)
cubeTest2(j, "data/fits/casa.fits")
extTest(j, "data/fits/nicmos.fits")
mosaicTest(j, "data/fits/mosaicimage.fits")
staticColormapTest(j)
maskBlendTest(j)
maskOverlayTest(j)
blendTest(j)
resizeSeparateTest(j)
j.BlendDisplay(False)
closeDisplay(j)
xmmProxyTest(j)
sleep(2)
j.close()
sleep(2)
sys.exit()
if __name__ == '__main__':
smokeTests()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.